From owner-svn-src-head@freebsd.org Sun Dec 31 00:37:52 2017 Return-Path: Delivered-To: svn-src-head@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 25951EB57A9; Sun, 31 Dec 2017 00:37:52 +0000 (UTC) (envelope-from mjg@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id F11BE75B97; Sun, 31 Dec 2017 00:37:51 +0000 (UTC) (envelope-from mjg@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id vBV0bpSS022265; Sun, 31 Dec 2017 00:37:51 GMT (envelope-from mjg@FreeBSD.org) Received: (from mjg@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id vBV0bpGV022264; Sun, 31 Dec 2017 00:37:51 GMT (envelope-from mjg@FreeBSD.org) Message-Id: <201712310037.vBV0bpGV022264@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: mjg set sender to mjg@FreeBSD.org using -f From: Mateusz Guzik Date: Sun, 31 Dec 2017 00:37:51 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r327397 - head/sys/kern X-SVN-Group: head X-SVN-Commit-Author: mjg X-SVN-Commit-Paths: head/sys/kern X-SVN-Commit-Revision: 327397 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.25 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 31 Dec 2017 00:37:52 -0000 Author: mjg Date: Sun Dec 31 00:37:50 2017 New Revision: 327397 URL: https://svnweb.freebsd.org/changeset/base/327397 Log: sx: read the SX_NOADAPTIVE flag and Giant ownership only once These used to be read multiple times when waiting for the lock the become free, which had the potential to issue completely avoidable traffic. Modified: head/sys/kern/kern_sx.c Modified: head/sys/kern/kern_sx.c ============================================================================== --- head/sys/kern/kern_sx.c Sun Dec 31 00:35:11 2017 (r327396) +++ head/sys/kern/kern_sx.c Sun Dec 31 00:37:50 2017 (r327397) @@ -91,7 +91,7 @@ PMC_SOFT_DECLARE( , , lock, failed); WITNESS_SAVE_DECL(Giant) \ #define GIANT_SAVE(work) do { \ - if (mtx_owned(&Giant)) { \ + if (__predict_false(mtx_owned(&Giant))) { \ work++; \ WITNESS_SAVE(&Giant.lock_object, Giant); \ while (mtx_owned(&Giant)) { \ @@ -533,6 +533,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO #ifdef ADAPTIVE_SX volatile struct thread *owner; u_int i, n, spintries = 0; + bool adaptive; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; @@ -581,6 +582,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); +#ifdef ADAPTIVE_SX + adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0); +#endif + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -597,6 +602,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO state = x; } #endif +#ifndef INVARIANTS + GIANT_SAVE(extra_work); +#endif for (;;) { if (x == SX_LOCK_UNLOCKED) { @@ -604,67 +612,68 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO break; continue; } +#ifdef INVARIANTS + GIANT_SAVE(extra_work); +#endif #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_SX + if (__predict_false(!adaptive)) + goto sleepq; /* * If the lock is write locked and the owner is * running on another CPU, spin until the owner stops * running or the state of the lock changes. */ - if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { - if ((x & SX_LOCK_SHARED) == 0) { - owner = lv_sx_owner(x); - if (TD_IS_RUNNING(owner)) { - if (LOCK_LOG_TEST(&sx->lock_object, 0)) - CTR3(KTR_LOCK, - "%s: spinning on %p held by %p", - __func__, sx, owner); - KTR_STATE1(KTR_SCHED, "thread", - sched_tdname(curthread), "spinning", - "lockname:\"%s\"", - sx->lock_object.lo_name); - GIANT_SAVE(extra_work); - do { - lock_delay(&lda); - x = SX_READ_VALUE(sx); - owner = lv_sx_owner(x); - } while (owner != NULL && - TD_IS_RUNNING(owner)); - KTR_STATE0(KTR_SCHED, "thread", - sched_tdname(curthread), "running"); - continue; - } - } else if (SX_SHARERS(x) && spintries < asx_retries) { + if ((x & SX_LOCK_SHARED) == 0) { + owner = lv_sx_owner(x); + if (TD_IS_RUNNING(owner)) { + if (LOCK_LOG_TEST(&sx->lock_object, 0)) + CTR3(KTR_LOCK, + "%s: spinning on %p held by %p", + __func__, sx, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", - "lockname:\"%s\"", sx->lock_object.lo_name); - GIANT_SAVE(extra_work); - spintries++; - for (i = 0; i < asx_loops; i += n) { - if (LOCK_LOG_TEST(&sx->lock_object, 0)) - CTR4(KTR_LOCK, - "%s: shared spinning on %p with %u and %u", - __func__, sx, spintries, i); - n = SX_SHARERS(x); - lock_delay_spin(n); + "lockname:\"%s\"", + sx->lock_object.lo_name); + do { + lock_delay(&lda); x = SX_READ_VALUE(sx); - if ((x & SX_LOCK_SHARED) == 0 || - SX_SHARERS(x) == 0) - break; - } -#ifdef KDTRACE_HOOKS - lda.spin_cnt += i; -#endif + owner = lv_sx_owner(x); + } while (owner != NULL && + TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); - if (i != asx_loops) - continue; + continue; } + } else if (SX_SHARERS(x) && spintries < asx_retries) { + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", sx->lock_object.lo_name); + spintries++; + for (i = 0; i < asx_loops; i += n) { + if (LOCK_LOG_TEST(&sx->lock_object, 0)) + CTR4(KTR_LOCK, + "%s: shared spinning on %p with %u and %u", + __func__, sx, spintries, i); + n = SX_SHARERS(x); + lock_delay_spin(n); + x = SX_READ_VALUE(sx); + if ((x & SX_LOCK_SHARED) == 0 || + SX_SHARERS(x) == 0) + break; + } +#ifdef KDTRACE_HOOKS + lda.spin_cnt += i; +#endif + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); + if (i != asx_loops) + continue; } #endif - +sleepq: sleepq_lock(&sx->lock_object); x = SX_READ_VALUE(sx); retry_sleepq: @@ -686,8 +695,7 @@ retry_sleepq: * chain lock. If so, drop the sleep queue lock and try * again. */ - if (!(x & SX_LOCK_SHARED) && - (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { + if (!(x & SX_LOCK_SHARED) && adaptive) { owner = (struct thread *)SX_OWNER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); @@ -742,7 +750,6 @@ retry_sleepq: #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&sx->lock_object); #endif - GIANT_SAVE(extra_work); sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); @@ -892,6 +899,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO GIANT_DECLARE; #ifdef ADAPTIVE_SX volatile struct thread *owner; + bool adaptive; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; @@ -920,6 +928,10 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO lock_delay_arg_init(&lda, NULL); #endif +#ifdef ADAPTIVE_SX + adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0); +#endif + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -936,6 +948,9 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO state = x; } #endif +#ifndef INVARIANTS + GIANT_SAVE(extra_work); +#endif /* * As with rwlocks, we don't make any attempt to try to block @@ -944,39 +959,42 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO for (;;) { if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)) break; +#ifdef INVARIANTS + GIANT_SAVE(extra_work); +#endif #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_SX + if (__predict_false(!adaptive)) + goto sleepq; /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock * changes. */ - if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { - owner = lv_sx_owner(x); - if (TD_IS_RUNNING(owner)) { - if (LOCK_LOG_TEST(&sx->lock_object, 0)) - CTR3(KTR_LOCK, - "%s: spinning on %p held by %p", - __func__, sx, owner); - KTR_STATE1(KTR_SCHED, "thread", - sched_tdname(curthread), "spinning", - "lockname:\"%s\"", sx->lock_object.lo_name); - GIANT_SAVE(extra_work); - do { - lock_delay(&lda); - x = SX_READ_VALUE(sx); - owner = lv_sx_owner(x); - } while (owner != NULL && TD_IS_RUNNING(owner)); - KTR_STATE0(KTR_SCHED, "thread", - sched_tdname(curthread), "running"); - continue; - } + owner = lv_sx_owner(x); + if (TD_IS_RUNNING(owner)) { + if (LOCK_LOG_TEST(&sx->lock_object, 0)) + CTR3(KTR_LOCK, + "%s: spinning on %p held by %p", + __func__, sx, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", sx->lock_object.lo_name); + do { + lock_delay(&lda); + x = SX_READ_VALUE(sx); + owner = lv_sx_owner(x); + } while (owner != NULL && TD_IS_RUNNING(owner)); + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); + continue; } #endif +sleepq: /* * Some other thread already has an exclusive lock, so * start the process of blocking. @@ -999,8 +1017,7 @@ retry_sleepq: * the owner stops running or the state of the lock * changes. */ - if (!(x & SX_LOCK_SHARED) && - (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { + if (!(x & SX_LOCK_SHARED) && adaptive) { owner = (struct thread *)SX_OWNER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); @@ -1035,7 +1052,6 @@ retry_sleepq: #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&sx->lock_object); #endif - GIANT_SAVE(extra_work); sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);