From owner-svn-src-head@freebsd.org Fri Oct 20 00:30:36 2017 Return-Path: Delivered-To: svn-src-head@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id AEF05E4B3A9; Fri, 20 Oct 2017 00:30:36 +0000 (UTC) (envelope-from mjg@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 89CA016CA; Fri, 20 Oct 2017 00:30:36 +0000 (UTC) (envelope-from mjg@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id v9K0UZgW037480; Fri, 20 Oct 2017 00:30:35 GMT (envelope-from mjg@FreeBSD.org) Received: (from mjg@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id v9K0UZLs037478; Fri, 20 Oct 2017 00:30:35 GMT (envelope-from mjg@FreeBSD.org) Message-Id: <201710200030.v9K0UZLs037478@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: mjg set sender to mjg@FreeBSD.org using -f From: Mateusz Guzik Date: Fri, 20 Oct 2017 00:30:35 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r324778 - in head/sys: kern sys X-SVN-Group: head X-SVN-Commit-Author: mjg X-SVN-Commit-Paths: in head/sys: kern sys X-SVN-Commit-Revision: 324778 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 20 Oct 2017 00:30:36 -0000 Author: mjg Date: Fri Oct 20 00:30:35 2017 New Revision: 324778 URL: https://svnweb.freebsd.org/changeset/base/324778 Log: mtx: clean up locking spin mutexes 1) shorten the fast path by pushing the lockstat probe to the slow path 2) test for kernel panic only after it turns out we will have to spin, in particular test only after we know we are not recursing MFC after: 1 week Modified: head/sys/kern/kern_mutex.c head/sys/sys/mutex.h Modified: head/sys/kern/kern_mutex.c ============================================================================== --- head/sys/kern/kern_mutex.c Fri Oct 20 00:29:39 2017 (r324777) +++ head/sys/kern/kern_mutex.c Fri Oct 20 00:30:35 2017 (r324778) @@ -289,6 +289,7 @@ __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, int line) { struct mtx *m; + uintptr_t tid, v; if (SCHEDULER_STOPPED()) return; @@ -308,7 +309,14 @@ __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, opts &= ~MTX_RECURSE; WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); - __mtx_lock_spin(m, curthread, opts, file, line); + spinlock_enter(); + tid = (uintptr_t)curthread; + v = MTX_UNOWNED; + if (!_mtx_obtain_lock_fetch(m, &v, tid)) + _mtx_lock_spin(m, v, opts, file, line); + else + LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, + m, 0, 0, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); @@ -682,12 +690,18 @@ _mtx_lock_spin_failed(struct mtx *m) * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ +#if LOCK_DEBUG > 0 void -_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, - int opts, const char *file, int line) +_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, + const char *file, int line) +#else +void +_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) +#endif { struct mtx *m; struct lock_delay_arg lda; + uintptr_t tid; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; @@ -699,10 +713,7 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t int doing_lockprof; #endif - if (SCHEDULER_STOPPED()) - return; - - lock_delay_arg_init(&lda, &mtx_spin_delay); + tid = (uintptr_t)curthread; m = mtxlock2mtx(c); if (__predict_false(v == MTX_UNOWNED)) @@ -712,6 +723,11 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t m->mtx_recurse++; return; } + + if (SCHEDULER_STOPPED()) + return; + + lock_delay_arg_init(&lda, &mtx_spin_delay); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); Modified: head/sys/sys/mutex.h ============================================================================== --- head/sys/sys/mutex.h Fri Oct 20 00:29:39 2017 (r324777) +++ head/sys/sys/mutex.h Fri Oct 20 00:30:35 2017 (r324778) @@ -105,9 +105,13 @@ void __mtx_unlock_sleep(volatile uintptr_t *c); #endif #ifdef SMP -void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, - int opts, const char *file, int line); +#if LOCK_DEBUG > 0 +void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, + const char *file, int line); +#else +void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v); #endif +#endif void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, @@ -154,9 +158,14 @@ void thread_lock_flags_(struct thread *, int, const ch __mtx_unlock_sleep(&(m)->mtx_lock) #endif #ifdef SMP -#define _mtx_lock_spin(m, v, t, o, f, l) \ - _mtx_lock_spin_cookie(&(m)->mtx_lock, v, t, o, f, l) +#if LOCK_DEBUG > 0 +#define _mtx_lock_spin(m, v, o, f, l) \ + _mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l) +#else +#define _mtx_lock_spin(m, v, o, f, l) \ + _mtx_lock_spin_cookie(&(m)->mtx_lock, v) #endif +#endif #define _mtx_lock_flags(m, o, f, l) \ __mtx_lock_flags(&(m)->mtx_lock, o, f, l) #define _mtx_unlock_flags(m, o, f, l) \ @@ -219,11 +228,9 @@ void thread_lock_flags_(struct thread *, int, const ch uintptr_t _v = MTX_UNOWNED; \ \ spinlock_enter(); \ - if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) \ - _mtx_lock_spin((mp), _v, _tid, (opts), (file), (line)); \ - else \ - LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ - mp, 0, 0, file, line); \ + if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ + !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ + _mtx_lock_spin((mp), _v, (opts), (file), (line)); \ } while (0) #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ uintptr_t _tid = (uintptr_t)(tid); \