Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 28 Apr 2012 00:12:24 +0000 (UTC)
From:      Nathan Whitehorn <nwhitehorn@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r234745 - head/sys/powerpc/aim
Message-ID:  <201204280012.q3S0COYL031472@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: nwhitehorn
Date: Sat Apr 28 00:12:23 2012
New Revision: 234745
URL: http://svn.freebsd.org/changeset/base/234745

Log:
  After switching mutexes to use lwsync, they no longer provide sufficient
  guarantees on acquire for the tlbie mutex. Conversely, the TLB invalidation
  sequence provides guarantees that do not need to be redundantly applied on
  release. Roll a small custom lock that is just right. Simultaneously,
  convert the SLB tree changes back to lwsync, as changing them to sync
  was a misdiagnosis of the tlbie barrier problem this commit actually fixes.

Modified:
  head/sys/powerpc/aim/moea64_native.c
  head/sys/powerpc/aim/slb.c

Modified: head/sys/powerpc/aim/moea64_native.c
==============================================================================
--- head/sys/powerpc/aim/moea64_native.c	Fri Apr 27 23:39:21 2012	(r234744)
+++ head/sys/powerpc/aim/moea64_native.c	Sat Apr 28 00:12:23 2012	(r234745)
@@ -133,36 +133,31 @@ __FBSDID("$FreeBSD$");
 
 #define	VSID_HASH_MASK	0x0000007fffffffffULL
 
-/*
- * The tlbie instruction must be executed in 64-bit mode
- * so we have to twiddle MSR[SF] around every invocation.
- * Just to add to the fun, exceptions must be off as well
- * so that we can't trap in 64-bit mode. What a pain.
- */
-static struct mtx	tlbie_mutex;
-
 static __inline void
 TLBIE(uint64_t vpn) {
 #ifndef __powerpc64__
 	register_t vpn_hi, vpn_lo;
 	register_t msr;
-	register_t scratch;
+	register_t scratch, intr;
 #endif
 
+	static volatile u_int tlbie_lock = 0;
+
 	vpn <<= ADDR_PIDX_SHFT;
 	vpn &= ~(0xffffULL << 48);
 
+	/* Hobo spinlock: we need stronger guarantees than mutexes provide */
+	while (!atomic_cmpset_int(&tlbie_lock, 0, 1));
+	isync(); /* Flush instruction queue once lock acquired */
+
 #ifdef __powerpc64__
-	mtx_lock(&tlbie_mutex);
 	__asm __volatile("tlbie %0" :: "r"(vpn) : "memory");
-	mtx_unlock(&tlbie_mutex);
-	__asm __volatile("eieio; tlbsync; ptesync");
+	__asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
 #else
 	vpn_hi = (uint32_t)(vpn >> 32);
 	vpn_lo = (uint32_t)vpn;
 
-	/* Note: spin mutex is to disable exceptions while fiddling MSR */
-	mtx_lock_spin(&tlbie_mutex);
+	intr = intr_disable();
 	__asm __volatile("\
 	    mfmsr %0; \
 	    mr %1, %0; \
@@ -179,8 +174,11 @@ TLBIE(uint64_t vpn) {
 	    ptesync;" 
 	: "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
 	    : "memory");
-	mtx_unlock_spin(&tlbie_mutex);
+	intr_enable();
 #endif
+
+	/* No barriers or special ops -- taken care of by ptesync above */
+	tlbie_lock = 0;
 }
 
 #define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
@@ -261,9 +259,9 @@ moea64_pte_clear_native(mmu_t mmu, uintp
 	 * As shown in Section 7.6.3.2.3
 	 */
 	pt->pte_lo &= ~ptebit;
-	sched_pin();
+	critical_enter();
 	TLBIE(vpn);
-	sched_unpin();
+	critical_exit();
 }
 
 static void
@@ -297,12 +295,12 @@ moea64_pte_unset_native(mmu_t mmu, uintp
 	 * Invalidate the pte.
 	 */
 	isync();
-	sched_pin();
+	critical_enter();
 	pvo_pt->pte_hi &= ~LPTE_VALID;
 	pt->pte_hi &= ~LPTE_VALID;
 	PTESYNC();
 	TLBIE(vpn);
-	sched_unpin();
+	critical_exit();
 
 	/*
 	 * Save the reg & chg bits.
@@ -405,15 +403,6 @@ moea64_bootstrap_native(mmu_t mmup, vm_o
 
 	CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
 
-	/*
-	 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU.
-	 */
-#ifdef __powerpc64__
-	mtx_init(&tlbie_mutex, "tlbie", NULL, MTX_DEF);
-#else
-	mtx_init(&tlbie_mutex, "tlbie", NULL, MTX_SPIN);
-#endif
-
 	moea64_mid_bootstrap(mmup, kernelstart, kernelend);
 
 	/*

Modified: head/sys/powerpc/aim/slb.c
==============================================================================
--- head/sys/powerpc/aim/slb.c	Fri Apr 27 23:39:21 2012	(r234744)
+++ head/sys/powerpc/aim/slb.c	Sat Apr 28 00:12:23 2012	(r234745)
@@ -139,7 +139,7 @@ make_new_leaf(uint64_t esid, uint64_t sl
 	 * that a lockless searcher always sees a valid path through
 	 * the tree.
 	 */
-	powerpc_sync();
+	mb();
 
 	idx = esid2idx(esid, parent->ua_level);
 	parent->u.ua_child[idx] = child;
@@ -187,7 +187,7 @@ make_intermediate(uint64_t esid, struct 
 	idx = esid2idx(child->ua_base, inter->ua_level);
 	inter->u.ua_child[idx] = child;
 	setbit(&inter->ua_alloc, idx);
-	powerpc_sync();
+	mb();
 
 	/* Set up parent to point to intermediate node ... */
 	idx = esid2idx(inter->ua_base, parent->ua_level);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201204280012.q3S0COYL031472>