Date: Sun, 31 Dec 2017 00:37:51 +0000 (UTC) From: Mateusz Guzik <mjg@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r327397 - head/sys/kern Message-ID: <201712310037.vBV0bpGV022264@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mjg Date: Sun Dec 31 00:37:50 2017 New Revision: 327397 URL: https://svnweb.freebsd.org/changeset/base/327397 Log: sx: read the SX_NOADAPTIVE flag and Giant ownership only once These used to be read multiple times when waiting for the lock the become free, which had the potential to issue completely avoidable traffic. Modified: head/sys/kern/kern_sx.c Modified: head/sys/kern/kern_sx.c ============================================================================== --- head/sys/kern/kern_sx.c Sun Dec 31 00:35:11 2017 (r327396) +++ head/sys/kern/kern_sx.c Sun Dec 31 00:37:50 2017 (r327397) @@ -91,7 +91,7 @@ PMC_SOFT_DECLARE( , , lock, failed); WITNESS_SAVE_DECL(Giant) \ #define GIANT_SAVE(work) do { \ - if (mtx_owned(&Giant)) { \ + if (__predict_false(mtx_owned(&Giant))) { \ work++; \ WITNESS_SAVE(&Giant.lock_object, Giant); \ while (mtx_owned(&Giant)) { \ @@ -533,6 +533,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO #ifdef ADAPTIVE_SX volatile struct thread *owner; u_int i, n, spintries = 0; + bool adaptive; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; @@ -581,6 +582,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); +#ifdef ADAPTIVE_SX + adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0); +#endif + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -597,6 +602,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO state = x; } #endif +#ifndef INVARIANTS + GIANT_SAVE(extra_work); +#endif for (;;) { if (x == SX_LOCK_UNLOCKED) { @@ -604,67 +612,68 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO break; continue; } +#ifdef INVARIANTS + GIANT_SAVE(extra_work); +#endif #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_SX + if (__predict_false(!adaptive)) + goto sleepq; /* * If the lock is write locked and the owner is * running on another CPU, spin until the owner stops * running or the state of the lock changes. */ - if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { - if ((x & SX_LOCK_SHARED) == 0) { - owner = lv_sx_owner(x); - if (TD_IS_RUNNING(owner)) { - if (LOCK_LOG_TEST(&sx->lock_object, 0)) - CTR3(KTR_LOCK, - "%s: spinning on %p held by %p", - __func__, sx, owner); - KTR_STATE1(KTR_SCHED, "thread", - sched_tdname(curthread), "spinning", - "lockname:\"%s\"", - sx->lock_object.lo_name); - GIANT_SAVE(extra_work); - do { - lock_delay(&lda); - x = SX_READ_VALUE(sx); - owner = lv_sx_owner(x); - } while (owner != NULL && - TD_IS_RUNNING(owner)); - KTR_STATE0(KTR_SCHED, "thread", - sched_tdname(curthread), "running"); - continue; - } - } else if (SX_SHARERS(x) && spintries < asx_retries) { + if ((x & SX_LOCK_SHARED) == 0) { + owner = lv_sx_owner(x); + if (TD_IS_RUNNING(owner)) { + if (LOCK_LOG_TEST(&sx->lock_object, 0)) + CTR3(KTR_LOCK, + "%s: spinning on %p held by %p", + __func__, sx, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", - "lockname:\"%s\"", sx->lock_object.lo_name); - GIANT_SAVE(extra_work); - spintries++; - for (i = 0; i < asx_loops; i += n) { - if (LOCK_LOG_TEST(&sx->lock_object, 0)) - CTR4(KTR_LOCK, - "%s: shared spinning on %p with %u and %u", - __func__, sx, spintries, i); - n = SX_SHARERS(x); - lock_delay_spin(n); + "lockname:\"%s\"", + sx->lock_object.lo_name); + do { + lock_delay(&lda); x = SX_READ_VALUE(sx); - if ((x & SX_LOCK_SHARED) == 0 || - SX_SHARERS(x) == 0) - break; - } -#ifdef KDTRACE_HOOKS - lda.spin_cnt += i; -#endif + owner = lv_sx_owner(x); + } while (owner != NULL && + TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); - if (i != asx_loops) - continue; + continue; } + } else if (SX_SHARERS(x) && spintries < asx_retries) { + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", sx->lock_object.lo_name); + spintries++; + for (i = 0; i < asx_loops; i += n) { + if (LOCK_LOG_TEST(&sx->lock_object, 0)) + CTR4(KTR_LOCK, + "%s: shared spinning on %p with %u and %u", + __func__, sx, spintries, i); + n = SX_SHARERS(x); + lock_delay_spin(n); + x = SX_READ_VALUE(sx); + if ((x & SX_LOCK_SHARED) == 0 || + SX_SHARERS(x) == 0) + break; + } +#ifdef KDTRACE_HOOKS + lda.spin_cnt += i; +#endif + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); + if (i != asx_loops) + continue; } #endif - +sleepq: sleepq_lock(&sx->lock_object); x = SX_READ_VALUE(sx); retry_sleepq: @@ -686,8 +695,7 @@ retry_sleepq: * chain lock. If so, drop the sleep queue lock and try * again. */ - if (!(x & SX_LOCK_SHARED) && - (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { + if (!(x & SX_LOCK_SHARED) && adaptive) { owner = (struct thread *)SX_OWNER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); @@ -742,7 +750,6 @@ retry_sleepq: #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&sx->lock_object); #endif - GIANT_SAVE(extra_work); sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); @@ -892,6 +899,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO GIANT_DECLARE; #ifdef ADAPTIVE_SX volatile struct thread *owner; + bool adaptive; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; @@ -920,6 +928,10 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO lock_delay_arg_init(&lda, NULL); #endif +#ifdef ADAPTIVE_SX + adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0); +#endif + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -936,6 +948,9 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO state = x; } #endif +#ifndef INVARIANTS + GIANT_SAVE(extra_work); +#endif /* * As with rwlocks, we don't make any attempt to try to block @@ -944,39 +959,42 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO for (;;) { if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)) break; +#ifdef INVARIANTS + GIANT_SAVE(extra_work); +#endif #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_SX + if (__predict_false(!adaptive)) + goto sleepq; /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock * changes. */ - if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { - owner = lv_sx_owner(x); - if (TD_IS_RUNNING(owner)) { - if (LOCK_LOG_TEST(&sx->lock_object, 0)) - CTR3(KTR_LOCK, - "%s: spinning on %p held by %p", - __func__, sx, owner); - KTR_STATE1(KTR_SCHED, "thread", - sched_tdname(curthread), "spinning", - "lockname:\"%s\"", sx->lock_object.lo_name); - GIANT_SAVE(extra_work); - do { - lock_delay(&lda); - x = SX_READ_VALUE(sx); - owner = lv_sx_owner(x); - } while (owner != NULL && TD_IS_RUNNING(owner)); - KTR_STATE0(KTR_SCHED, "thread", - sched_tdname(curthread), "running"); - continue; - } + owner = lv_sx_owner(x); + if (TD_IS_RUNNING(owner)) { + if (LOCK_LOG_TEST(&sx->lock_object, 0)) + CTR3(KTR_LOCK, + "%s: spinning on %p held by %p", + __func__, sx, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", sx->lock_object.lo_name); + do { + lock_delay(&lda); + x = SX_READ_VALUE(sx); + owner = lv_sx_owner(x); + } while (owner != NULL && TD_IS_RUNNING(owner)); + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); + continue; } #endif +sleepq: /* * Some other thread already has an exclusive lock, so * start the process of blocking. @@ -999,8 +1017,7 @@ retry_sleepq: * the owner stops running or the state of the lock * changes. */ - if (!(x & SX_LOCK_SHARED) && - (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { + if (!(x & SX_LOCK_SHARED) && adaptive) { owner = (struct thread *)SX_OWNER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); @@ -1035,7 +1052,6 @@ retry_sleepq: #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&sx->lock_object); #endif - GIANT_SAVE(extra_work); sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201712310037.vBV0bpGV022264>