Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 31 Oct 2012 13:38:56 +0000 (UTC)
From:      Attilio Rao <attilio@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r242395 - in head/sys: kern sys
Message-ID:  <201210311338.q9VDcuCE052876@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: attilio
Date: Wed Oct 31 13:38:56 2012
New Revision: 242395
URL: http://svn.freebsd.org/changeset/base/242395

Log:
  Give mtx(9) the ability to crunch different type of structures, with the
  only constraint that they have a lock cookie named mtx_lock.
  This name, then, becames reserved from the struct that wants to use the
  mtx(9) KPI and other locking primitives cannot reuse it for their
  members.
  
  Namely such structs are the current struct mtx and the new
  struct mtx_padalign.  The new structure will define an object which is
  the same as the same layout of a struct mtx but will be allocated in
  areas aligned to the cache line size and will be as big as a cache line.
  
  This is supposed to give higher performance for highly contented mutexes
  both spin or sleep (because of the adaptive spinning), where the cache
  line contention results in too much traffic on the system bus.
  
  The struct mtx_padalign can be used in a completely transparent way
  with the mtx(9) KPI.
  
  At the moment, a possibility to MFC the patch should be carefully
  evaluated because this patch breaks the low level KPI
  (not its representation though).
  
  Discussed with:	jhb
  Reviewed by:	jeff, andre
  Reviewed by:	mdf (earlier version)
  Tested by:	jimharris

Modified:
  head/sys/kern/kern_mutex.c
  head/sys/sys/_mutex.h
  head/sys/sys/mutex.h

Modified: head/sys/kern/kern_mutex.c
==============================================================================
--- head/sys/kern/kern_mutex.c	Wed Oct 31 08:25:45 2012	(r242394)
+++ head/sys/kern/kern_mutex.c	Wed Oct 31 13:38:56 2012	(r242395)
@@ -83,6 +83,12 @@ PMC_SOFT_DEFINE( , , lock, failed);
 #endif
 
 /*
+ * Return the mutex address when the lock cookie address is provided.
+ * This functionality assumes that struct mtx* have a member named mtx_lock.
+ */
+#define	mtxlock2mtx(c)	(__containerof(c, struct mtx, mtx_lock))
+
+/*
  * Internal utility macros.
  */
 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
@@ -195,11 +201,15 @@ owner_mtx(const struct lock_object *lock
  * modules and can also be called from assembly language if needed.
  */
 void
-_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
+__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
 {
+	struct mtx *m;
 
 	if (SCHEDULER_STOPPED())
 		return;
+
+	m = mtxlock2mtx(c);
+
 	KASSERT(!TD_IS_IDLETHREAD(curthread),
 	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
 	    curthread, m->lock_object.lo_name, file, line));
@@ -219,11 +229,15 @@ _mtx_lock_flags(struct mtx *m, int opts,
 }
 
 void
-_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
+__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
 {
+	struct mtx *m;
 
 	if (SCHEDULER_STOPPED())
 		return;
+
+	m = mtxlock2mtx(c);
+
 	KASSERT(m->mtx_lock != MTX_DESTROYED,
 	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
@@ -241,11 +255,16 @@ _mtx_unlock_flags(struct mtx *m, int opt
 }
 
 void
-_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
+    int line)
 {
+	struct mtx *m;
 
 	if (SCHEDULER_STOPPED())
 		return;
+
+	m = mtxlock2mtx(c);
+
 	KASSERT(m->mtx_lock != MTX_DESTROYED,
 	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
@@ -264,11 +283,16 @@ _mtx_lock_spin_flags(struct mtx *m, int 
 }
 
 void
-_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
+    int line)
 {
+	struct mtx *m;
 
 	if (SCHEDULER_STOPPED())
 		return;
+
+	m = mtxlock2mtx(c);
+
 	KASSERT(m->mtx_lock != MTX_DESTROYED,
 	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
@@ -288,8 +312,9 @@ _mtx_unlock_spin_flags(struct mtx *m, in
  * is already owned, it will recursively acquire the lock.
  */
 int
-mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line)
+_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
 {
+	struct mtx *m;
 #ifdef LOCK_PROFILING
 	uint64_t waittime = 0;
 	int contested = 0;
@@ -299,6 +324,8 @@ mtx_trylock_flags_(struct mtx *m, int op
 	if (SCHEDULER_STOPPED())
 		return (1);
 
+	m = mtxlock2mtx(c);
+
 	KASSERT(!TD_IS_IDLETHREAD(curthread),
 	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
 	    curthread, m->lock_object.lo_name, file, line));
@@ -330,15 +357,16 @@ mtx_trylock_flags_(struct mtx *m, int op
 }
 
 /*
- * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
+ * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
  *
  * We call this if the lock is either contested (i.e. we need to go to
  * sleep waiting for it), or if we need to recurse on it.
  */
 void
-_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
-    int line)
+__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
+    const char *file, int line)
 {
+	struct mtx *m;
 	struct turnstile *ts;
 	uintptr_t v;
 #ifdef ADAPTIVE_MUTEXES
@@ -360,6 +388,8 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 	if (SCHEDULER_STOPPED())
 		return;
 
+	m = mtxlock2mtx(c);
+
 	if (mtx_owned(m)) {
 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
@@ -518,15 +548,16 @@ _mtx_lock_spin_failed(struct mtx *m)
 
 #ifdef SMP
 /*
- * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
+ * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
  *
  * This is only called if we need to actually spin for the lock. Recursion
  * is handled inline.
  */
 void
-_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
-    int line)
+_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
+    const char *file, int line)
 {
+	struct mtx *m;
 	int i = 0;
 #ifdef LOCK_PROFILING
 	int contested = 0;
@@ -536,6 +567,8 @@ _mtx_lock_spin(struct mtx *m, uintptr_t 
 	if (SCHEDULER_STOPPED())
 		return;
 
+	m = mtxlock2mtx(c);
+
 	if (LOCK_LOG_TEST(&m->lock_object, opts))
 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
 
@@ -684,19 +717,22 @@ thread_lock_set(struct thread *td, struc
 }
 
 /*
- * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
+ * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
  *
  * We are only called here if the lock is recursed or contested (i.e. we
  * need to wake up a blocked thread).
  */
 void
-_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
+__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
 {
+	struct mtx *m;
 	struct turnstile *ts;
 
 	if (SCHEDULER_STOPPED())
 		return;
 
+	m = mtxlock2mtx(c);
+
 	if (mtx_recursed(m)) {
 		if (--(m->mtx_recurse) == 0)
 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
@@ -735,11 +771,15 @@ _mtx_unlock_sleep(struct mtx *m, int opt
  */
 #ifdef INVARIANT_SUPPORT
 void
-_mtx_assert(const struct mtx *m, int what, const char *file, int line)
+__mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
 {
+	const struct mtx *m;
 
 	if (panicstr != NULL || dumping)
 		return;
+
+	m = mtxlock2mtx(c);
+
 	switch (what) {
 	case MA_OWNED:
 	case MA_OWNED | MA_RECURSED:
@@ -806,7 +846,8 @@ mtx_sysinit(void *arg)
 {
 	struct mtx_args *margs = arg;
 
-	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
+	mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
+	    margs->ma_opts);
 }
 
 /*
@@ -816,11 +857,14 @@ mtx_sysinit(void *arg)
  * witness.
  */
 void
-mtx_init(struct mtx *m, const char *name, const char *type, int opts)
+_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
 {
+	struct mtx *m;
 	struct lock_class *class;
 	int flags;
 
+	m = mtxlock2mtx(c);
+
 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
 		MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
 	ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
@@ -863,8 +907,11 @@ mtx_init(struct mtx *m, const char *name
  * flags.
  */
 void
-mtx_destroy(struct mtx *m)
+_mtx_destroy(volatile uintptr_t *c)
 {
+	struct mtx *m;
+
+	m = mtxlock2mtx(c);
 
 	if (!mtx_owned(m))
 		MPASS(mtx_unowned(m));

Modified: head/sys/sys/_mutex.h
==============================================================================
--- head/sys/sys/_mutex.h	Wed Oct 31 08:25:45 2012	(r242394)
+++ head/sys/sys/_mutex.h	Wed Oct 31 13:38:56 2012	(r242395)
@@ -31,12 +31,34 @@
 #ifndef _SYS__MUTEX_H_
 #define	_SYS__MUTEX_H_
 
+#include <machine/param.h>
+
 /*
  * Sleep/spin mutex.
+ *
+ * The layout of the first 2 members of struct mtx* is considered fixed.
+ * More specifically, it is assumed that there is a member called mtx_lock
+ * for every struct mtx* and that other locking primitive structures are
+ * not allowed to use such name for their members.
+ * If this needs to change, the bits in the mutex implementation might be
+ * modified appropriately.
  */
 struct mtx {
 	struct lock_object	lock_object;	/* Common lock properties. */
 	volatile uintptr_t	mtx_lock;	/* Owner and flags. */
 };
 
+/*
+ * Members of struct mtx_padalign must mirror members of struct mtx.
+ * mtx_padalign mutexes can use mtx(9) KPI transparently, without modifies.
+ * When using pad-aligned mutexes within structures, they should generally
+ * stay as the first member of the struct.  This is because otherwise the
+ * compiler can generate ever more padding for the struct to keep a correct
+ * alignment for the mutex.
+ */
+struct mtx_padalign {
+	struct lock_object	lock_object;	/* Common lock properties. */
+	volatile uintptr_t	mtx_lock;	/* Owner and flags. */
+} __aligned(CACHE_LINE_SIZE);
+
 #endif /* !_SYS__MUTEX_H_ */

Modified: head/sys/sys/mutex.h
==============================================================================
--- head/sys/sys/mutex.h	Wed Oct 31 08:25:45 2012	(r242394)
+++ head/sys/sys/mutex.h	Wed Oct 31 13:38:56 2012	(r242395)
@@ -79,7 +79,8 @@
  *
  * NOTE: Functions prepended with `_' (underscore) are exported to other parts
  *	 of the kernel via macros, thus allowing us to use the cpp LOCK_FILE
- *	 and LOCK_LINE. These functions should not be called directly by any
+ *	 and LOCK_LINE or for hiding the lock cookie crunching to the
+ *	 consumers. These functions should not be called directly by any
  *	 code using the API. Their macros cover their functionality.
  *	 Functions with a `_' suffix are the entrypoint for the common
  *	 KPI covering both compat shims and fast path case.  These can be
@@ -89,28 +90,32 @@
  * [See below for descriptions]
  *
  */
-void	mtx_init(struct mtx *m, const char *name, const char *type, int opts);
-void	mtx_destroy(struct mtx *m);
+void	_mtx_init(volatile uintptr_t *c, const char *name, const char *type,
+	    int opts);
+void	_mtx_destroy(volatile uintptr_t *c);
 void	mtx_sysinit(void *arg);
-int	mtx_trylock_flags_(struct mtx *m, int opts, const char *file,
+int	_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
 	    int line);
 void	mutex_init(void);
-void	_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts,
+void	__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
 	    const char *file, int line);
-void	_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
+void	__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file,
+	    int line);
 #ifdef SMP
-void	_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts,
+void	_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
 	    const char *file, int line);
 #endif
-void	_mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
-void	_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line);
-void	_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line);
-void	_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file,
-	     int line);
-void	_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file,
+void	__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file,
+	    int line);
+void	__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file,
+	    int line);
+void	__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
 	     int line);
+void	__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts,
+	    const char *file, int line);
 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
-void	_mtx_assert(const struct mtx *m, int what, const char *file, int line);
+void	__mtx_assert(const volatile uintptr_t *c, int what, const char *file,
+	    int line);
 #endif
 void	thread_lock_flags_(struct thread *, int, const char *, int);
 
@@ -121,6 +126,38 @@ void	thread_lock_flags_(struct thread *,
 #define	thread_unlock(tdp)						\
        mtx_unlock_spin((tdp)->td_lock)
 
+/*
+ * Top-level macros to provide lock cookie once the actual mtx is passed.
+ * They will also prevent passing a malformed object to the mtx KPI by
+ * failing compilation.
+ */
+#define	mtx_init(m, n, t, o)						\
+	_mtx_init(&(m)->mtx_lock, n, t, o)
+#define	mtx_destroy(m)							\
+	_mtx_destroy(&(m)->mtx_lock)
+#define	mtx_trylock_flags_(m, o, f, l)					\
+	_mtx_trylock_flags_(&(m)->mtx_lock, o, f, l)
+#define	_mtx_lock_sleep(m, t, o, f, l)					\
+	__mtx_lock_sleep(&(m)->mtx_lock, t, o, f, l)
+#define	_mtx_unlock_sleep(m, o, f, l)					\
+	__mtx_unlock_sleep(&(m)->mtx_lock, o, f, l)
+#ifdef SMP
+#define	_mtx_lock_spin(m, t, o, f, l)					\
+	_mtx_lock_spin_cookie(&(m)->mtx_lock, t, o, f, l)
+#endif
+#define	_mtx_lock_flags(m, o, f, l)					\
+	__mtx_lock_flags(&(m)->mtx_lock, o, f, l)
+#define	_mtx_unlock_flags(m, o, f, l)					\
+	__mtx_unlock_flags(&(m)->mtx_lock, o, f, l)
+#define	_mtx_lock_spin_flags(m, o, f, l)				\
+	__mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l)
+#define	_mtx_unlock_spin_flags(m, o, f, l)				\
+	__mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l)
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+#define	_mtx_assert(m, w, f, l)						\
+	__mtx_assert(&(m)->mtx_lock, w, f, l)
+#endif
+
 #define	mtx_recurse	lock_object.lo_data
 
 /* Very simple operations on mtx_lock. */
@@ -395,7 +432,7 @@ do {									\
 } while (0)
 
 struct mtx_args {
-	struct mtx	*ma_mtx;
+	void		*ma_mtx;
 	const char 	*ma_desc;
 	int		 ma_opts;
 };
@@ -409,7 +446,7 @@ struct mtx_args {
 	SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
 	    mtx_sysinit, &name##_args);					\
 	SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
-	    mtx_destroy, (mtx))
+	    _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock))
 
 /*
  * The INVARIANTS-enabled mtx_assert() functionality.



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201210311338.q9VDcuCE052876>