Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 12 Feb 2017 09:49:44 +0000 (UTC)
From:      Mateusz Guzik <mjg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r313683 - in head/sys: kern sys
Message-ID:  <201702120949.v1C9ni48080168@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mjg
Date: Sun Feb 12 09:49:44 2017
New Revision: 313683
URL: https://svnweb.freebsd.org/changeset/base/313683

Log:
  lockmgr: implement fast path
  
  The main lockmgr routine takes 8 arguments which makes it impossible to
  tail-call it by the intermediate vop_stdlock/unlock routines.
  
  The routine itself starts with an if-forest and reads from the lock itself
  several times.
  
  This slows things down both single- and multi-threaded. With the patch
  single-threaded fstats go 4% up and multithreaded up to ~27%.
  
  Note that there is still a lot of room for improvement.
  
  Reviewed by:	kib
  Tested by:	pho

Modified:
  head/sys/kern/kern_lock.c
  head/sys/kern/vfs_default.c
  head/sys/sys/lockmgr.h

Modified: head/sys/kern/kern_lock.c
==============================================================================
--- head/sys/kern/kern_lock.c	Sun Feb 12 09:27:34 2017	(r313682)
+++ head/sys/kern/kern_lock.c	Sun Feb 12 09:49:44 2017	(r313683)
@@ -168,6 +168,72 @@ SYSCTL_UINT(_debug_lockmgr, OID_AUTO, re
 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
 #endif
 
+static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
+    int flags);
+static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t x);
+
+static void
+lockmgr_note_shared_acquire(struct lock *lk, int contested,
+    uint64_t waittime, const char *file, int line, int flags)
+{
+
+	lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
+	    file, line);
+	LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
+	WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
+	TD_LOCKS_INC(curthread);
+	TD_SLOCKS_INC(curthread);
+	STACK_SAVE(lk);
+}
+
+static void
+lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
+{
+
+	lock_profile_release_lock(&lk->lock_object);
+	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
+	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
+	TD_LOCKS_DEC(curthread);
+	TD_SLOCKS_DEC(curthread);
+}
+
+static void
+lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
+    uint64_t waittime, const char *file, int line, int flags)
+{
+
+	lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
+	    file, line);
+	LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
+	WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
+	    line);
+	TD_LOCKS_INC(curthread);
+	STACK_SAVE(lk);
+}
+
+static void
+lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
+{
+
+	lock_profile_release_lock(&lk->lock_object);
+	LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
+	    line);
+	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
+	TD_LOCKS_DEC(curthread);
+}
+
+static void
+lockmgr_note_exclusive_upgrade(struct lock *lk, const char *file, int line,
+    int flags)
+{
+
+	LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
+	    line);
+	WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
+	    LK_TRYWIT(flags), file, line);
+	TD_SLOCKS_DEC(curthread);
+}
+
 static __inline struct thread *
 lockmgr_xholder(const struct lock *lk)
 {
@@ -234,35 +300,11 @@ wakeupshlk(struct lock *lk, const char *
 	u_int realexslp;
 	int queue, wakeup_swapper;
 
-	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
-	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
-
 	wakeup_swapper = 0;
 	for (;;) {
 		x = lk->lk_lock;
-
-		/*
-		 * If there is more than one shared lock held, just drop one
-		 * and return.
-		 */
-		if (LK_SHARERS(x) > 1) {
-			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
-			    x - LK_ONE_SHARER))
-				break;
-			continue;
-		}
-
-		/*
-		 * If there are not waiters on the exclusive queue, drop the
-		 * lock quickly.
-		 */
-		if ((x & LK_ALL_WAITERS) == 0) {
-			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
-			    LK_SHARERS_LOCK(1));
-			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
-				break;
-			continue;
-		}
+		if (lockmgr_sunlock_try(lk, x))
+			break;
 
 		/*
 		 * We should have a sharer with waiters, so enter the hard
@@ -332,9 +374,7 @@ wakeupshlk(struct lock *lk, const char *
 		break;
 	}
 
-	lock_profile_release_lock(&lk->lock_object);
-	TD_LOCKS_DEC(curthread);
-	TD_SLOCKS_DEC(curthread);
+	lockmgr_note_shared_release(lk, file, line);
 	return (wakeup_swapper);
 }
 
@@ -448,6 +488,165 @@ lockdestroy(struct lock *lk)
 	lock_destroy(&lk->lock_object);
 }
 
+static bool __always_inline
+lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags)
+{
+
+	/*
+	 * If no other thread has an exclusive lock, or
+	 * no exclusive waiter is present, bump the count of
+	 * sharers.  Since we have to preserve the state of
+	 * waiters, if we fail to acquire the shared lock
+	 * loop back and retry.
+	 */
+	*xp = lk->lk_lock;
+	while (LK_CAN_SHARE(*xp, flags)) {
+		if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
+		    *xp + LK_ONE_SHARER)) {
+			return (true);
+		}
+	}
+	return (false);
+}
+
+static bool __always_inline
+lockmgr_sunlock_try(struct lock *lk, uintptr_t x)
+{
+
+	for (;;) {
+		/*
+		 * If there is more than one shared lock held, just drop one
+		 * and return.
+		 */
+		if (LK_SHARERS(x) > 1) {
+			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
+			    x - LK_ONE_SHARER))
+				return (true);
+			continue;
+		}
+
+		/*
+		 * If there are not waiters on the exclusive queue, drop the
+		 * lock quickly.
+		 */
+		if ((x & LK_ALL_WAITERS) == 0) {
+			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
+			    LK_SHARERS_LOCK(1));
+			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
+			    LK_UNLOCKED))
+				return (true);
+			continue;
+		}
+		break;
+	}
+	return (false);
+}
+
+int
+lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
+    const char *file, int line)
+{
+	struct lock_class *class;
+	uintptr_t x, v, tid;
+	u_int op;
+	bool locked;
+
+	op = flags & LK_TYPE_MASK;
+	locked = false;
+	switch (op) {
+	case LK_SHARED:
+		if (LK_CAN_WITNESS(flags))
+			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
+			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
+		if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
+			break;
+		if (lockmgr_slock_try(lk, &x, flags)) {
+			lockmgr_note_shared_acquire(lk, 0, 0,
+			    file, line, flags);
+			locked = true;
+		}
+		break;
+	case LK_EXCLUSIVE:
+		if (LK_CAN_WITNESS(flags))
+			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
+			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
+			    ilk : NULL);
+		tid = (uintptr_t)curthread;
+		if (lk->lk_lock == LK_UNLOCKED &&
+		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
+			lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
+			    flags);
+			locked = true;
+		}
+		break;
+	case LK_UPGRADE:
+	case LK_TRYUPGRADE:
+		_lockmgr_assert(lk, KA_SLOCKED, file, line);
+		tid = (uintptr_t)curthread;
+		v = lk->lk_lock;
+		x = v & LK_ALL_WAITERS;
+		v &= LK_EXCLUSIVE_SPINNERS;
+		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
+		    tid | x)) {
+			lockmgr_note_exclusive_upgrade(lk, file, line, flags);
+			locked = true;
+		}
+		break;
+	default:
+		break;
+	}
+	if (__predict_true(locked)) {
+		if (__predict_false(flags & LK_INTERLOCK)) {
+			class = LOCK_CLASS(ilk);
+			class->lc_unlock(ilk);
+		}
+		return (0);
+	} else {
+		return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
+		    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
+	}
+}
+
+int
+lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
+{
+	struct lock_class *class;
+	uintptr_t x, tid;
+	bool unlocked;
+	const char *file;
+	int line;
+
+	file = __FILE__;
+	line = __LINE__;
+
+	_lockmgr_assert(lk, KA_LOCKED, file, line);
+	unlocked = false;
+	x = lk->lk_lock;
+	if (__predict_true(x & LK_SHARE) != 0) {
+		if (lockmgr_sunlock_try(lk, x)) {
+			lockmgr_note_shared_release(lk, file, line);
+			unlocked = true;
+		}
+	} else {
+		tid = (uintptr_t)curthread;
+		if (!lockmgr_recursed(lk) &&
+		    atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
+			lockmgr_note_exclusive_release(lk, file, line);
+			unlocked = true;
+		}
+	}
+	if (__predict_true(unlocked)) {
+		if (__predict_false(flags & LK_INTERLOCK)) {
+			class = LOCK_CLASS(ilk);
+			class->lc_unlock(ilk);
+		}
+		return (0);
+	} else {
+		return (__lockmgr_args(lk, flags | LK_RELEASE, ilk, LK_WMESG_DEFAULT,
+		    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE));
+	}
+}
+
 int
 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
     const char *wmesg, int pri, int timo, const char *file, int line)
@@ -518,21 +717,8 @@ __lockmgr_args(struct lock *lk, u_int fl
 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
 		for (;;) {
-			x = lk->lk_lock;
-
-			/*
-			 * If no other thread has an exclusive lock, or
-			 * no exclusive waiter is present, bump the count of
-			 * sharers.  Since we have to preserve the state of
-			 * waiters, if we fail to acquire the shared lock
-			 * loop back and retry.
-			 */
-			if (LK_CAN_SHARE(x, flags)) {
-				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
-				    x + LK_ONE_SHARER))
-					break;
-				continue;
-			}
+			if (lockmgr_slock_try(lk, &x, flags))
+				break;
 #ifdef HWPMC_HOOKS
 			PMC_SOFT_CALL( , , lock, failed);
 #endif
@@ -697,15 +883,13 @@ __lockmgr_args(struct lock *lk, u_int fl
 			    __func__, lk);
 		}
 		if (error == 0) {
-			lock_profile_obtain_lock_success(&lk->lock_object,
-			    contested, waittime, file, line);
-			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
-			    line);
-			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
-			    line);
-			TD_LOCKS_INC(curthread);
-			TD_SLOCKS_INC(curthread);
-			STACK_SAVE(lk);
+#ifdef LOCK_PROFILING
+			lockmgr_note_shared_acquire(lk, contested, waittime,
+			    file, line, flags);
+#else
+			lockmgr_note_shared_acquire(lk, 0, 0, file, line,
+			    flags);
+#endif
 		}
 		break;
 	case LK_UPGRADE:
@@ -968,14 +1152,13 @@ __lockmgr_args(struct lock *lk, u_int fl
 			    __func__, lk);
 		}
 		if (error == 0) {
-			lock_profile_obtain_lock_success(&lk->lock_object,
-			    contested, waittime, file, line);
-			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
-			    lk->lk_recurse, file, line);
-			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
-			    LK_TRYWIT(flags), file, line);
-			TD_LOCKS_INC(curthread);
-			STACK_SAVE(lk);
+#ifdef LOCK_PROFILING
+			lockmgr_note_exclusive_acquire(lk, contested, waittime,
+			    file, line, flags);
+#else
+			lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
+			    flags);
+#endif
 		}
 		break;
 	case LK_DOWNGRADE:

Modified: head/sys/kern/vfs_default.c
==============================================================================
--- head/sys/kern/vfs_default.c	Sun Feb 12 09:27:34 2017	(r313682)
+++ head/sys/kern/vfs_default.c	Sun Feb 12 09:49:44 2017	(r313683)
@@ -520,10 +520,11 @@ vop_stdlock(ap)
 	} */ *ap;
 {
 	struct vnode *vp = ap->a_vp;
+	struct mtx *ilk;
 
-	return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
-	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
-	    ap->a_line));
+	ilk = VI_MTX(vp);
+	return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags,
+	    (ilk != NULL) ? &ilk->lock_object : NULL, ap->a_file, ap->a_line));
 }
 
 /* See above. */
@@ -535,8 +536,11 @@ vop_stdunlock(ap)
 	} */ *ap;
 {
 	struct vnode *vp = ap->a_vp;
+	struct mtx *ilk;
 
-	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
+	ilk = VI_MTX(vp);
+	return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags,
+	    (ilk != NULL) ? &ilk->lock_object : NULL));
 }
 
 /* See above. */

Modified: head/sys/sys/lockmgr.h
==============================================================================
--- head/sys/sys/lockmgr.h	Sun Feb 12 09:27:34 2017	(r313682)
+++ head/sys/sys/lockmgr.h	Sun Feb 12 09:49:44 2017	(r313683)
@@ -68,6 +68,10 @@ struct thread;
  */
 int	 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
 	    const char *wmesg, int prio, int timo, const char *file, int line);
+int	 lockmgr_lock_fast_path(struct lock *lk, u_int flags,
+	    struct lock_object *ilk, const char *file, int line);
+int	 lockmgr_unlock_fast_path(struct lock *lk, u_int flags,
+	    struct lock_object *ilk);
 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
 void	 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line);
 #endif



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201702120949.v1C9ni48080168>