Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 12 Jun 2010 01:45:29 +0000 (UTC)
From:      Marcel Moolenaar <marcel@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r209085 - head/sys/ia64/ia64
Message-ID:  <201006120145.o5C1jT9R052484@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: marcel
Date: Sat Jun 12 01:45:29 2010
New Revision: 209085
URL: http://svn.freebsd.org/changeset/base/209085

Log:
  The ptc.g operation for the Mckinley and Madison processors has the
  side-effect of purging more than the requested translation. While
  this is not a problem in general, it invalidates the assumption made
  during constructing the trapframe on entry into the kernel in SMP
  configurations. The assumption is that only the first store to the
  stack will possibly cause a TLB miss. Since the ptc.g purges the
  translation caches of all CPUs in the coherency domain, a ptc.g
  executed on one CPU can cause a purge on another CPU that is
  currently running the critical code that saves the state to the
  trapframe. This can cause an unexpected TLB miss and with interrupt
  collection disabled this means an unexpected data nested TLB fault.
  
  A data nested TLB fault will not save any context, nor provide a
  way for software to determine what caused the TLB miss nor where
  it occured. Careful construction of the kernel entry and exit code
  allows us to handle a TLB miss in precisely orchastrated points
  and thereby avoiding the need to wire the kernel stack, but the
  unexpected TLB miss caused by the ptc.g instructution resulted in
  an unrecoverable condition and resulting in machine checks.
  
  The solution to this problem is to synchronize the kernel entry
  on all CPUs with the use of the ptc.g instruction on a single CPU
  by implementing a bare-bones readers-writer lock that allows N
  readers (= N CPUs entering the kernel) and 1 writer (= execution
  of the ptc.g instruction on some CPU). This solution wins over
  a rendez-vous approach by not interrupting CPUs with an IPI.
  
  This problem has not been observed on the Montecito.
  
  PR:		ia64/147772
  MFC after:	6 days

Modified:
  head/sys/ia64/ia64/exception.S
  head/sys/ia64/ia64/pmap.c

Modified: head/sys/ia64/ia64/exception.S
==============================================================================
--- head/sys/ia64/ia64/exception.S	Sat Jun 12 00:28:53 2010	(r209084)
+++ head/sys/ia64/ia64/exception.S	Sat Jun 12 01:45:29 2010	(r209085)
@@ -170,6 +170,27 @@ ENTRY_NOPROFILE(exception_save, 0)
 	 *	r30,r31=trapframe pointers
 	 *	p14,p15=memory stack switch
 	 */
+
+	/* PTC.G enter non-exclusive */
+	mov	r24 = ar.ccv
+	movl	r25 = pmap_ptc_g_sem
+	;;
+.ptc_g_0:
+	ld8.acq	r26 = [r25]
+	;;
+	tbit.nz	p12, p0 = r26, 63
+(p12)	br.cond.spnt.few .ptc_g_0
+	;;
+	mov	ar.ccv = r26
+	adds	r27 = 1, r26
+	;;
+	cmpxchg8.rel	r27 = [r25], r27, ar.ccv
+	;;
+	cmp.ne	p12, p0 = r26, r27
+(p12)	br.cond.spnt.few .ptc_g_0
+	;;
+	mov	ar.ccv = r24
+
 exception_save_restart:
 {	.mmi
 	st8		[r30]=r19,16		// length
@@ -407,6 +428,23 @@ exception_save_restart:
 	movl		gp=__gp
 	;;
 }
+
+	/* PTC.G leave non-exclusive */
+	srlz.d
+	movl	r25 = pmap_ptc_g_sem
+	;;
+.ptc_g_1:
+	ld8.acq r26 = [r25]
+	;;
+	mov	ar.ccv = r26
+	adds	r27 = -1, r26
+	;;
+	cmpxchg8.rel	r27 = [r25], r27, ar.ccv
+	;;
+	cmp.ne	p12, p0 = r26, r27
+(p12)	br.cond.spnt.few .ptc_g_1
+	;;
+
 {	.mib
 	srlz.d
 	nop		0

Modified: head/sys/ia64/ia64/pmap.c
==============================================================================
--- head/sys/ia64/ia64/pmap.c	Sat Jun 12 00:28:53 2010	(r209084)
+++ head/sys/ia64/ia64/pmap.c	Sat Jun 12 01:45:29 2010	(r209085)
@@ -182,7 +182,8 @@ static uint64_t pmap_ptc_e_count1 = 3;
 static uint64_t pmap_ptc_e_count2 = 2;
 static uint64_t pmap_ptc_e_stride1 = 0x2000;
 static uint64_t pmap_ptc_e_stride2 = 0x100000000;
-struct mtx pmap_ptcmutex;
+
+volatile u_long pmap_ptc_g_sem;
 
 /*
  * Data for the RID allocator
@@ -340,7 +341,6 @@ pmap_bootstrap()
 		       pmap_ptc_e_count2,
 		       pmap_ptc_e_stride1,
 		       pmap_ptc_e_stride2);
-	mtx_init(&pmap_ptcmutex, "Global PTC lock", NULL, MTX_SPIN);
 
 	/*
 	 * Setup RIDs. RIDs 0..7 are reserved for the kernel.
@@ -540,7 +540,8 @@ pmap_invalidate_page(vm_offset_t va)
 {
 	struct ia64_lpte *pte;
 	struct pcpu *pc;
-	uint64_t tag;
+	uint64_t tag, sem;
+	register_t is;
 	u_int vhpt_ofs;
 
 	critical_enter();
@@ -550,10 +551,32 @@ pmap_invalidate_page(vm_offset_t va)
 		pte = (struct ia64_lpte *)(pc->pc_md.vhpt + vhpt_ofs);
 		atomic_cmpset_64(&pte->tag, tag, 1UL << 63);
 	}
-	critical_exit();
-	mtx_lock_spin(&pmap_ptcmutex);
+
+	/* PTC.G enter exclusive */
+	is = intr_disable();
+
+	/* Atomically assert writer after all writers have gone. */
+	do {
+		/* Wait until there's no more writer. */
+		do {
+			sem = atomic_load_acq_long(&pmap_ptc_g_sem);
+			tag = sem | (1ul << 63);
+		} while (sem == tag);
+	} while (!atomic_cmpset_rel_long(&pmap_ptc_g_sem, sem, tag));
+
+	/* Wait until all readers are gone. */
+	tag = (1ul << 63);
+	do {
+		sem = atomic_load_acq_long(&pmap_ptc_g_sem);
+	} while (sem != tag);
+
 	ia64_ptc_ga(va, PAGE_SHIFT << 2);
-	mtx_unlock_spin(&pmap_ptcmutex);
+
+	/* PTC.G leave exclusive */
+	atomic_store_rel_long(&pmap_ptc_g_sem, 0);
+
+	intr_restore(is);
+	critical_exit();
 }
 
 static void



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201006120145.o5C1jT9R052484>