Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 21 Jul 2020 14:42:22 +0000 (UTC)
From:      Mateusz Guzik <mjg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r363394 - in head/sys: kern sys
Message-ID:  <202007211442.06LEgM0R024701@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mjg
Date: Tue Jul 21 14:42:22 2020
New Revision: 363394
URL: https://svnweb.freebsd.org/changeset/base/363394

Log:
  lockmgr: denote recursion with a bit in lock value
  
  This reduces excessive reads from the lock.
  
  Tested by:	pho

Modified:
  head/sys/kern/kern_lock.c
  head/sys/sys/lockmgr.h

Modified: head/sys/kern/kern_lock.c
==============================================================================
--- head/sys/kern/kern_lock.c	Tue Jul 21 14:41:25 2020	(r363393)
+++ head/sys/kern/kern_lock.c	Tue Jul 21 14:42:22 2020	(r363394)
@@ -736,6 +736,7 @@ lockmgr_xlock_hard(struct lock *lk, u_int flags, struc
 			panic("%s: recursing on non recursive lockmgr %p "
 			    "@ %s:%d\n", __func__, lk, file, line);
 		}
+		atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
 		lk->lk_recurse++;
 		LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
 		LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
@@ -1039,9 +1040,11 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_i
 	 * The lock is held in exclusive mode.
 	 * If the lock is recursed also, then unrecurse it.
 	 */
-	if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) {
+	if (lockmgr_recursed_v(x)) {
 		LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
 		lk->lk_recurse--;
+		if (lk->lk_recurse == 0)
+			atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
 		goto out;
 	}
 	if (tid != LK_KERNPROC)
@@ -1187,9 +1190,8 @@ lockmgr_unlock(struct lock *lk)
 	} else {
 		tid = (uintptr_t)curthread;
 		lockmgr_note_exclusive_release(lk, file, line);
-		if (!lockmgr_recursed(lk) &&
-		    atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
-			LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_WRITER);
+		if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
+			LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
 		} else {
 			return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
 		}

Modified: head/sys/sys/lockmgr.h
==============================================================================
--- head/sys/sys/lockmgr.h	Tue Jul 21 14:41:25 2020	(r363393)
+++ head/sys/sys/lockmgr.h	Tue Jul 21 14:42:22 2020	(r363394)
@@ -42,13 +42,14 @@
 #define	LK_SHARED_WAITERS		0x02
 #define	LK_EXCLUSIVE_WAITERS		0x04
 #define	LK_EXCLUSIVE_SPINNERS		0x08
+#define	LK_WRITER_RECURSED		0x10
 #define	LK_ALL_WAITERS							\
 	(LK_SHARED_WAITERS | LK_EXCLUSIVE_WAITERS)
 #define	LK_FLAGMASK							\
-	(LK_SHARE | LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)
+	(LK_SHARE | LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS | LK_WRITER_RECURSED)
 
 #define	LK_HOLDER(x)			((x) & ~LK_FLAGMASK)
-#define	LK_SHARERS_SHIFT		4
+#define	LK_SHARERS_SHIFT		5
 #define	LK_SHARERS(x)			(LK_HOLDER(x) >> LK_SHARERS_SHIFT)
 #define	LK_SHARERS_LOCK(x)		((x) << LK_SHARERS_SHIFT | LK_SHARE)
 #define	LK_ONE_SHARER			(1 << LK_SHARERS_SHIFT)
@@ -131,8 +132,10 @@ _lockmgr_args_rw(struct lock *lk, u_int flags, struct 
 	    LOCK_FILE, LOCK_LINE)
 #define	lockmgr_disown(lk)						\
 	_lockmgr_disown((lk), LOCK_FILE, LOCK_LINE)
+#define	lockmgr_recursed_v(v)						\
+	(v & LK_WRITER_RECURSED)
 #define	lockmgr_recursed(lk)						\
-	((lk)->lk_recurse != 0)
+	lockmgr_recursed_v((lk)->lk_lock)
 #define	lockmgr_rw(lk, flags, ilk)					\
 	_lockmgr_args_rw((lk), (flags), (ilk), LK_WMESG_DEFAULT,	\
 	    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202007211442.06LEgM0R024701>