Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 3 Nov 2014 13:14:34 +0000 (UTC)
From:      Mateusz Guzik <mjg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r274048 - head/sys/sys
Message-ID:  <201411031314.sA3DEYX7096436@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mjg
Date: Mon Nov  3 13:14:34 2014
New Revision: 274048
URL: https://svnweb.freebsd.org/changeset/base/274048

Log:
  Fix misplaced read memory barrier in seq.
  
  Impact on capability races was small: it was possible to get a spurious
  ENOTCAPABLE (early return), but it was not possible to bypass checks.
  
  Tidy up some comments.

Modified:
  head/sys/sys/seq.h

Modified: head/sys/sys/seq.h
==============================================================================
--- head/sys/sys/seq.h	Mon Nov  3 13:02:58 2014	(r274047)
+++ head/sys/sys/seq.h	Mon Nov  3 13:14:34 2014	(r274048)
@@ -70,16 +70,16 @@ typedef uint32_t seq_t;
 #include <machine/cpu.h>
 
 /*
- * This is a temporary hack until memory barriers are cleaned up.
+ * Stuff below is going away when we gain suitable memory barriers.
  *
  * atomic_load_acq_int at least on amd64 provides a full memory barrier,
- * in a way which affects perforance.
+ * in a way which affects performance.
  *
  * Hack below covers all architectures and avoids most of the penalty at least
- * on amd64.
+ * on amd64 but still has unnecessary cost.
  */
 static __inline int
-atomic_load_acq_rmb_int(volatile u_int *p)
+atomic_load_rmb_int(volatile u_int *p)
 {
 	volatile u_int v;
 
@@ -88,6 +88,16 @@ atomic_load_acq_rmb_int(volatile u_int *
 	return (v);
 }
 
+static __inline int
+atomic_rmb_load_int(volatile u_int *p)
+{
+	volatile u_int v = 0;
+
+	atomic_load_acq_int(&v);
+	v = *p;
+	return (v);
+}
+
 static __inline bool
 seq_in_modify(seq_t seqp)
 {
@@ -117,7 +127,7 @@ seq_read(seq_t *seqp)
 	seq_t ret;
 
 	for (;;) {
-		ret = atomic_load_acq_rmb_int(seqp);
+		ret = atomic_load_rmb_int(seqp);
 		if (seq_in_modify(ret)) {
 			cpu_spinwait();
 			continue;
@@ -132,7 +142,7 @@ static __inline seq_t
 seq_consistent(seq_t *seqp, seq_t oldseq)
 {
 
-	return (atomic_load_acq_rmb_int(seqp) == oldseq);
+	return (atomic_rmb_load_int(seqp) == oldseq);
 }
 
 static __inline seq_t



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201411031314.sA3DEYX7096436>