Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 31 Dec 2016 16:37:47 +0000 (UTC)
From:      Mateusz Guzik <mjg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r310979 - in stable/10/sys: kern sys
Message-ID:  <201612311637.uBVGblbG081502@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mjg
Date: Sat Dec 31 16:37:47 2016
New Revision: 310979
URL: https://svnweb.freebsd.org/changeset/base/310979

Log:
  MFC r301157:
  
      Microoptimize locking primitives by avoiding unnecessary atomic ops.
  
      Inline version of primitives do an atomic op and if it fails they fallback to
      actual primitives, which immediately retry the atomic op.
  
      The obvious optimisation is to check if the lock is free and only then proceed
      to do an atomic op.

Modified:
  stable/10/sys/kern/kern_lock.c
  stable/10/sys/kern/kern_mutex.c
  stable/10/sys/kern/kern_rwlock.c
  stable/10/sys/kern/kern_sx.c
  stable/10/sys/sys/mutex.h
  stable/10/sys/sys/rwlock.h
  stable/10/sys/sys/sx.h
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/kern/kern_lock.c
==============================================================================
--- stable/10/sys/kern/kern_lock.c	Sat Dec 31 16:02:27 2016	(r310978)
+++ stable/10/sys/kern/kern_lock.c	Sat Dec 31 16:37:47 2016	(r310979)
@@ -792,8 +792,10 @@ __lockmgr_args(struct lock *lk, u_int fl
 			break;
 		}
 
-		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
-		    tid)) {
+		for (;;) {
+			if (lk->lk_lock == LK_UNLOCKED &&
+			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
+				break;
 #ifdef HWPMC_HOOKS
 			PMC_SOFT_CALL( , , lock, failed);
 #endif
@@ -1129,7 +1131,11 @@ __lockmgr_args(struct lock *lk, u_int fl
 			    __func__, iwmesg, file, line);
 		}
 
-		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
+		for (;;) {
+			if (lk->lk_lock == LK_UNLOCKED &&
+			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
+				break;
+
 #ifdef HWPMC_HOOKS
 			PMC_SOFT_CALL( , , lock, failed);
 #endif

Modified: stable/10/sys/kern/kern_mutex.c
==============================================================================
--- stable/10/sys/kern/kern_mutex.c	Sat Dec 31 16:02:27 2016	(r310978)
+++ stable/10/sys/kern/kern_mutex.c	Sat Dec 31 16:37:47 2016	(r310979)
@@ -451,7 +451,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
 	all_time -= lockstat_nsecs(&m->lock_object);
 #endif
 
-	while (!_mtx_obtain_lock(m, tid)) {
+	for (;;) {
+		if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
+			break;
 #ifdef KDTRACE_HOOKS
 		spin_cnt++;
 #endif
@@ -634,8 +636,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t
 #ifdef KDTRACE_HOOKS
 	spin_time -= lockstat_nsecs(&m->lock_object);
 #endif
-	while (!_mtx_obtain_lock(m, tid)) {
-
+	for (;;) {
+		if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
+			break;
 		/* Give interrupts a chance while we spin. */
 		spinlock_exit();
 		while (m->mtx_lock != MTX_UNOWNED) {
@@ -714,7 +717,9 @@ retry:
 			    m->lock_object.lo_name, file, line));
 		WITNESS_CHECKORDER(&m->lock_object,
 		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
-		while (!_mtx_obtain_lock(m, tid)) {
+		for (;;) {
+			if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
+				break;
 			if (m->mtx_lock == tid) {
 				m->mtx_recurse++;
 				break;

Modified: stable/10/sys/kern/kern_rwlock.c
==============================================================================
--- stable/10/sys/kern/kern_rwlock.c	Sat Dec 31 16:02:27 2016	(r310978)
+++ stable/10/sys/kern/kern_rwlock.c	Sat Dec 31 16:37:47 2016	(r310979)
@@ -768,7 +768,9 @@ __rw_wlock_hard(volatile uintptr_t *c, u
 	all_time -= lockstat_nsecs(&rw->lock_object);
 	state = rw->rw_lock;
 #endif
-	while (!_rw_write_lock(rw, tid)) {
+	for (;;) {
+		if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
+			break;
 #ifdef KDTRACE_HOOKS
 		spin_cnt++;
 #endif

Modified: stable/10/sys/kern/kern_sx.c
==============================================================================
--- stable/10/sys/kern/kern_sx.c	Sat Dec 31 16:02:27 2016	(r310978)
+++ stable/10/sys/kern/kern_sx.c	Sat Dec 31 16:37:47 2016	(r310979)
@@ -547,7 +547,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 	all_time -= lockstat_nsecs(&sx->lock_object);
 	state = sx->sx_lock;
 #endif
-	while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
+	for (;;) {
+		if (sx->sx_lock == SX_LOCK_UNLOCKED &&
+		    atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
+			break;
 #ifdef KDTRACE_HOOKS
 		spin_cnt++;
 #endif

Modified: stable/10/sys/sys/mutex.h
==============================================================================
--- stable/10/sys/sys/mutex.h	Sat Dec 31 16:02:27 2016	(r310978)
+++ stable/10/sys/sys/mutex.h	Sat Dec 31 16:37:47 2016	(r310979)
@@ -188,7 +188,7 @@ void	thread_lock_flags_(struct thread *,
 #define __mtx_lock(mp, tid, opts, file, line) do {			\
 	uintptr_t _tid = (uintptr_t)(tid);				\
 									\
-	if (!_mtx_obtain_lock((mp), _tid))				\
+	if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid)))\
 		_mtx_lock_sleep((mp), _tid, (opts), (file), (line));	\
 	else								\
               	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \
@@ -206,7 +206,7 @@ void	thread_lock_flags_(struct thread *,
 	uintptr_t _tid = (uintptr_t)(tid);				\
 									\
 	spinlock_enter();						\
-	if (!_mtx_obtain_lock((mp), _tid)) {				\
+	if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\
 		if ((mp)->mtx_lock == _tid)				\
 			(mp)->mtx_recurse++;				\
 		else							\
@@ -262,7 +262,7 @@ void	thread_lock_flags_(struct thread *,
 #define __mtx_unlock(mp, tid, opts, file, line) do {			\
 	uintptr_t _tid = (uintptr_t)(tid);				\
 									\
-	if (!_mtx_release_lock((mp), _tid))				\
+	if ((mp)->mtx_lock != _tid || !_mtx_release_lock((mp), _tid))	\
 		_mtx_unlock_sleep((mp), (opts), (file), (line));	\
 } while (0)
 

Modified: stable/10/sys/sys/rwlock.h
==============================================================================
--- stable/10/sys/sys/rwlock.h	Sat Dec 31 16:02:27 2016	(r310978)
+++ stable/10/sys/sys/rwlock.h	Sat Dec 31 16:37:47 2016	(r310979)
@@ -96,7 +96,7 @@
 #define	__rw_wlock(rw, tid, file, line) do {				\
 	uintptr_t _tid = (uintptr_t)(tid);				\
 						                        \
-	if (!_rw_write_lock((rw), _tid))				\
+	if ((rw)->rw_lock != RW_UNLOCKED || !_rw_write_lock((rw), _tid))\
 		_rw_wlock_hard((rw), _tid, (file), (line));		\
 	else 								\
 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, \
@@ -109,7 +109,7 @@
 									\
 	if ((rw)->rw_recurse)						\
 		(rw)->rw_recurse--;					\
-	else if (!_rw_write_unlock((rw), _tid))				\
+	else if ((rw)->rw_lock != _tid || !_rw_write_unlock((rw), _tid))\
 		_rw_wunlock_hard((rw), _tid, (file), (line));		\
 } while (0)
 

Modified: stable/10/sys/sys/sx.h
==============================================================================
--- stable/10/sys/sys/sx.h	Sat Dec 31 16:02:27 2016	(r310978)
+++ stable/10/sys/sys/sx.h	Sat Dec 31 16:37:47 2016	(r310979)
@@ -148,7 +148,8 @@ __sx_xlock(struct sx *sx, struct thread 
 	uintptr_t tid = (uintptr_t)td;
 	int error = 0;
 
-	if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
+	if (sx->sx_lock != SX_LOCK_UNLOCKED ||
+	    !atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
 		error = _sx_xlock_hard(sx, tid, opts, file, line);
 	else 
 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
@@ -163,7 +164,8 @@ __sx_xunlock(struct sx *sx, struct threa
 {
 	uintptr_t tid = (uintptr_t)td;
 
-	if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
+	if (sx->sx_lock != tid ||
+	    !atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
 		_sx_xunlock_hard(sx, tid, file, line);
 }
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201612311637.uBVGblbG081502>