Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 4 May 2012 16:00:24 +0000 (UTC)
From:      Nathan Whitehorn <nwhitehorn@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r235013 - in head/sys/powerpc: aim include
Message-ID:  <201205041600.q44G0O1S011532@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: nwhitehorn
Date: Fri May  4 16:00:22 2012
New Revision: 235013
URL: http://svn.freebsd.org/changeset/base/235013

Log:
  Fix final bugs in memory barriers on PowerPC:
  - Use isync/lwsync unconditionally for acquire/release. Use of isync
    guarantees a complete memory barrier, which is important for serialization
    of bus space accesses with mutexes on multi-processor systems.
  - Go back to using sync as the I/O memory barrier, which solves the same
    problem as above with respect to mutex release using lwsync, while not
    penalizing non-I/O operations like a return to sync on the atomic release
    operations would.
  - Place an acquisition barrier around thread lock acquisition in
    cpu_switchin().

Modified:
  head/sys/powerpc/aim/swtch32.S
  head/sys/powerpc/aim/swtch64.S
  head/sys/powerpc/include/atomic.h
  head/sys/powerpc/include/pio.h

Modified: head/sys/powerpc/aim/swtch32.S
==============================================================================
--- head/sys/powerpc/aim/swtch32.S	Fri May  4 15:57:05 2012	(r235012)
+++ head/sys/powerpc/aim/swtch32.S	Fri May  4 16:00:22 2012	(r235013)
@@ -124,7 +124,8 @@ cpu_switchin:
 blocked_loop:
 	lwz	%r7,TD_LOCK(%r2)
 	cmpw	%r6,%r7 
-	beq	blocked_loop
+	beq-	blocked_loop
+	isync
 #endif
 
 	mfsprg	%r7,0			/* Get the pcpu pointer */

Modified: head/sys/powerpc/aim/swtch64.S
==============================================================================
--- head/sys/powerpc/aim/swtch64.S	Fri May  4 15:57:05 2012	(r235012)
+++ head/sys/powerpc/aim/swtch64.S	Fri May  4 16:00:22 2012	(r235013)
@@ -150,7 +150,8 @@ cpu_switchin:
 blocked_loop:
 	ld	%r7,TD_LOCK(%r13)
 	cmpd	%r6,%r7 
-	beq	blocked_loop
+	beq-	blocked_loop
+	isync
 #endif
 
 	mfsprg	%r7,0			/* Get the pcpu pointer */

Modified: head/sys/powerpc/include/atomic.h
==============================================================================
--- head/sys/powerpc/include/atomic.h	Fri May  4 15:57:05 2012	(r235012)
+++ head/sys/powerpc/include/atomic.h	Fri May  4 16:00:22 2012	(r235013)
@@ -51,13 +51,8 @@
  * with the atomic lXarx/stXcx. sequences below. See Appendix B.2 of Book II
  * of the architecture manual.
  */
-#ifdef __powerpc64__
-#define __ATOMIC_REL()	__asm __volatile("lwsync" : : : "memory")
-#define __ATOMIC_ACQ()	__asm __volatile("lwsync" : : : "memory")
-#else
 #define __ATOMIC_REL()	__asm __volatile("lwsync" : : : "memory")
 #define __ATOMIC_ACQ()	__asm __volatile("isync" : : : "memory")
-#endif
 
 /*
  * atomic_add(p, v)

Modified: head/sys/powerpc/include/pio.h
==============================================================================
--- head/sys/powerpc/include/pio.h	Fri May  4 15:57:05 2012	(r235012)
+++ head/sys/powerpc/include/pio.h	Fri May  4 16:00:22 2012	(r235013)
@@ -39,7 +39,12 @@
  * I/O macros.
  */
 
-#define powerpc_iomb() __asm __volatile("eieio" : : : "memory")
+/*
+ * Use sync so that bus space operations cannot sneak out the bottom of
+ * mutex-protected sections (mutex release does not guarantee completion of
+ * accesses to caching-inhibited memory on some systems)
+ */
+#define powerpc_iomb() __asm __volatile("sync" : : : "memory")
 
 static __inline void
 __outb(volatile u_int8_t *a, u_int8_t v)



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201205041600.q44G0O1S011532>