From owner-svn-src-all@freebsd.org Wed Jul 22 12:30:32 2020 Return-Path: Delivered-To: svn-src-all@mailman.nyi.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.nyi.freebsd.org (Postfix) with ESMTP id 9618937B164; Wed, 22 Jul 2020 12:30:32 +0000 (UTC) (envelope-from mjg@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest SHA256 client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 4BBZYh3QVdz46lH; Wed, 22 Jul 2020 12:30:32 +0000 (UTC) (envelope-from mjg@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 590342F3BC; Wed, 22 Jul 2020 12:30:32 +0000 (UTC) (envelope-from mjg@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id 06MCUWh9039624; Wed, 22 Jul 2020 12:30:32 GMT (envelope-from mjg@FreeBSD.org) Received: (from mjg@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id 06MCUVb5039621; Wed, 22 Jul 2020 12:30:31 GMT (envelope-from mjg@FreeBSD.org) Message-Id: <202007221230.06MCUVb5039621@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: mjg set sender to mjg@FreeBSD.org using -f From: Mateusz Guzik Date: Wed, 22 Jul 2020 12:30:31 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r363415 - in head/sys: kern sys ufs/ffs X-SVN-Group: head X-SVN-Commit-Author: mjg X-SVN-Commit-Paths: in head/sys: kern sys ufs/ffs X-SVN-Commit-Revision: 363415 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.33 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 22 Jul 2020 12:30:32 -0000 Author: mjg Date: Wed Jul 22 12:30:31 2020 New Revision: 363415 URL: https://svnweb.freebsd.org/changeset/base/363415 Log: lockmgr: add adaptive spinning It is very conservative. Only spinning when LK_ADAPTIVE is passed, only on exclusive lock and never when any waiters are present. buffer cache is remains not spinning. This reduces total sleep times during buildworld etc., but it does not shorten total real time (culprits are contention in the vm subsystem along with slock + upgrade which is not covered). For microbenchmarks: open3_processes -t 52 (open/close of the same file for writing) ops/s: before: 258845 after: 801638 Reviewed by: kib Tested by: pho Differential Revision: https://reviews.freebsd.org/D25753 Modified: head/sys/kern/kern_lock.c head/sys/sys/lockmgr.h head/sys/ufs/ffs/ffs_vnops.c Modified: head/sys/kern/kern_lock.c ============================================================================== --- head/sys/kern/kern_lock.c Wed Jul 22 10:00:13 2020 (r363414) +++ head/sys/kern/kern_lock.c Wed Jul 22 12:30:31 2020 (r363415) @@ -167,6 +167,12 @@ struct lock_class lock_class_lockmgr = { #endif }; +static __read_mostly bool lk_adaptive = true; +static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging"); +SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive, + 0, ""); +#define lockmgr_delay locks_delay + struct lockmgr_wait { const char *iwmesg; int ipri; @@ -515,7 +521,6 @@ lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int * waiters, if we fail to acquire the shared lock * loop back and retry. */ - *xp = lockmgr_read_value(lk); while (LK_CAN_SHARE(*xp, flags, fp)) { if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp, *xp + LK_ONE_SHARER)) { @@ -541,6 +546,38 @@ lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp) return (false); } +static bool +lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp, + int flags) +{ + struct thread *owner; + uintptr_t x; + + x = *xp; + MPASS(x != LK_UNLOCKED); + owner = (struct thread *)LK_HOLDER(x); + for (;;) { + MPASS(owner != curthread); + if (owner == (struct thread *)LK_KERNPROC) + return (false); + if ((x & LK_SHARE) && LK_SHARERS(x) > 0) + return (false); + if (owner == NULL) + return (false); + if (!TD_IS_RUNNING(owner)) + return (false); + if ((x & LK_ALL_WAITERS) != 0) + return (false); + lock_delay(lda); + x = lockmgr_read_value(lk); + if (LK_CAN_SHARE(x, flags, false)) { + *xp = x; + return (true); + } + owner = (struct thread *)LK_HOLDER(x); + } +} + static __noinline int lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, const char *file, int line, struct lockmgr_wait *lwa) @@ -557,6 +594,7 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struc uint64_t waittime = 0; int contested = 0; #endif + struct lock_delay_arg lda; if (KERNEL_PANICKED()) goto out; @@ -566,9 +604,31 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struc if (LK_CAN_WITNESS(flags)) WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, file, line, flags & LK_INTERLOCK ? ilk : NULL); + lock_delay_arg_init(&lda, &lockmgr_delay); + if (!lk_adaptive) + flags &= ~LK_ADAPTIVE; + x = lockmgr_read_value(lk); + /* + * The lock may already be locked exclusive by curthread, + * avoid deadlock. + */ + if (LK_HOLDER(x) == tid) { + LOCK_LOG2(lk, + "%s: %p already held in exclusive mode", + __func__, lk); + error = EDEADLK; + goto out; + } + for (;;) { if (lockmgr_slock_try(lk, &x, flags, false)) break; + + if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) { + if (lockmgr_slock_adaptive(&lda, lk, &x, flags)) + continue; + } + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -576,18 +636,6 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struc &contested, &waittime); /* - * If the lock is already held by curthread in - * exclusive way avoid a deadlock. - */ - if (LK_HOLDER(x) == tid) { - LOCK_LOG2(lk, - "%s: %p already held in exclusive mode", - __func__, lk); - error = EDEADLK; - break; - } - - /* * If the lock is expected to not sleep just give up * and return. */ @@ -660,6 +708,7 @@ retry_sleepq: } LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", __func__, lk); + x = lockmgr_read_value(lk); } if (error == 0) { #ifdef KDTRACE_HOOKS @@ -682,6 +731,37 @@ out: return (error); } +static bool +lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp) +{ + struct thread *owner; + uintptr_t x; + + x = *xp; + MPASS(x != LK_UNLOCKED); + owner = (struct thread *)LK_HOLDER(x); + for (;;) { + MPASS(owner != curthread); + if (owner == NULL) + return (false); + if ((x & LK_SHARE) && LK_SHARERS(x) > 0) + return (false); + if (owner == (struct thread *)LK_KERNPROC) + return (false); + if (!TD_IS_RUNNING(owner)) + return (false); + if ((x & LK_ALL_WAITERS) != 0) + return (false); + lock_delay(lda); + x = lockmgr_read_value(lk); + if (x == LK_UNLOCKED) { + *xp = x; + return (true); + } + owner = (struct thread *)LK_HOLDER(x); + } +} + static __noinline int lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, const char *file, int line, struct lockmgr_wait *lwa) @@ -699,6 +779,7 @@ lockmgr_xlock_hard(struct lock *lk, u_int flags, struc uint64_t waittime = 0; int contested = 0; #endif + struct lock_delay_arg lda; if (KERNEL_PANICKED()) goto out; @@ -747,10 +828,19 @@ lockmgr_xlock_hard(struct lock *lk, u_int flags, struc goto out; } + x = LK_UNLOCKED; + lock_delay_arg_init(&lda, &lockmgr_delay); + if (!lk_adaptive) + flags &= ~LK_ADAPTIVE; for (;;) { - if (lk->lk_lock == LK_UNLOCKED && - atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) - break; + if (x == LK_UNLOCKED) { + if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid)) + break; + } + if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) { + if (lockmgr_xlock_adaptive(&lda, lk, &x)) + continue; + } #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -853,6 +943,7 @@ retry_sleepq: } LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", __func__, lk); + x = lockmgr_read_value(lk); } if (error == 0) { #ifdef KDTRACE_HOOKS @@ -954,6 +1045,7 @@ lockmgr_lock_flags(struct lock *lk, u_int flags, struc file, line, flags & LK_INTERLOCK ? ilk : NULL); if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE)) break; + x = lockmgr_read_value(lk); if (lockmgr_slock_try(lk, &x, flags, true)) { lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags); @@ -1139,12 +1231,13 @@ lockmgr_slock(struct lock *lk, u_int flags, const char if (LK_CAN_WITNESS(flags)) WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, file, line, NULL); + x = lockmgr_read_value(lk); if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) { lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags); return (0); } - return (lockmgr_slock_hard(lk, flags, NULL, file, line, NULL)); + return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL)); } int @@ -1165,7 +1258,7 @@ lockmgr_xlock(struct lock *lk, u_int flags, const char return (0); } - return (lockmgr_xlock_hard(lk, flags, NULL, file, line, NULL)); + return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL)); } int Modified: head/sys/sys/lockmgr.h ============================================================================== --- head/sys/sys/lockmgr.h Wed Jul 22 10:00:13 2020 (r363414) +++ head/sys/sys/lockmgr.h Wed Jul 22 12:30:31 2020 (r363415) @@ -170,6 +170,7 @@ _lockmgr_args_rw(struct lock *lk, u_int flags, struct #define LK_SLEEPFAIL 0x000800 #define LK_TIMELOCK 0x001000 #define LK_NODDLKTREAT 0x002000 +#define LK_ADAPTIVE 0x004000 /* * Operations for lockmgr(). Modified: head/sys/ufs/ffs/ffs_vnops.c ============================================================================== --- head/sys/ufs/ffs/ffs_vnops.c Wed Jul 22 10:00:13 2020 (r363414) +++ head/sys/ufs/ffs/ffs_vnops.c Wed Jul 22 12:30:31 2020 (r363415) @@ -445,6 +445,7 @@ ffs_lock(ap) struct lock *lkp; int result; + ap->a_flags |= LK_ADAPTIVE; switch (ap->a_flags & LK_TYPE_MASK) { case LK_SHARED: case LK_UPGRADE: @@ -482,6 +483,7 @@ ffs_lock(ap) } return (result); #else + ap->a_flags |= LK_ADAPTIVE; return (VOP_LOCK1_APV(&ufs_vnodeops, ap)); #endif }