Date: Wed, 5 Dec 2018 16:43:04 +0000 (UTC) From: Mateusz Guzik <mjg@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r341593 - in head/sys: contrib/vchiq/interface/compat kern sys Message-ID: <201812051643.wB5Gh45m078405@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mjg Date: Wed Dec 5 16:43:03 2018 New Revision: 341593 URL: https://svnweb.freebsd.org/changeset/base/341593 Log: sx: retire SX_NOADAPTIVE The flag is not used by anything for years and supporting it requires an explicit read from the lock when entering slow path. Flag value is left unused on purpose. Sponsored by: The FreeBSD Foundation Modified: head/sys/contrib/vchiq/interface/compat/vchi_bsd.h head/sys/kern/kern_sx.c head/sys/sys/sx.h Modified: head/sys/contrib/vchiq/interface/compat/vchi_bsd.h ============================================================================== --- head/sys/contrib/vchiq/interface/compat/vchi_bsd.h Wed Dec 5 15:56:44 2018 (r341592) +++ head/sys/contrib/vchiq/interface/compat/vchi_bsd.h Wed Dec 5 16:43:03 2018 (r341593) @@ -162,10 +162,6 @@ struct mutex { */ typedef struct rwlock rwlock_t; -#if defined(SX_ADAPTIVESPIN) && !defined(SX_NOADAPTIVE) -#define SX_NOADAPTIVE SX_ADAPTIVESPIN -#endif - #define DEFINE_RWLOCK(name) \ struct rwlock name; \ SX_SYSINIT(name, &name, #name) Modified: head/sys/kern/kern_sx.c ============================================================================== --- head/sys/kern/kern_sx.c Wed Dec 5 15:56:44 2018 (r341592) +++ head/sys/kern/kern_sx.c Wed Dec 5 16:43:03 2018 (r341593) @@ -71,8 +71,6 @@ __FBSDID("$FreeBSD$"); #define ADAPTIVE_SX #endif -CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); - #ifdef HWPMC_HOOKS #include <sys/pmckern.h> PMC_SOFT_DECLARE( , , lock, failed); @@ -233,7 +231,7 @@ sx_init_flags(struct sx *sx, const char *description, int flags; MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | - SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0); + SX_NOPROFILE | SX_NEW)) == 0); ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p", __func__, description, &sx->sx_lock)); @@ -252,7 +250,6 @@ sx_init_flags(struct sx *sx, const char *description, if (opts & SX_NEW) flags |= LO_NEW; - flags |= opts & SX_NOADAPTIVE; lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); sx->sx_lock = SX_LOCK_UNLOCKED; sx->sx_recurse = 0; @@ -572,7 +569,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO volatile struct thread *owner; u_int i, n, spintries = 0; enum { READERS, WRITER } sleep_reason = READERS; - bool adaptive; bool in_critical = false; #endif #ifdef LOCK_PROFILING @@ -642,10 +638,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); -#ifdef ADAPTIVE_SX - adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0); -#endif - #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -669,8 +661,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO lda.spin_cnt++; #endif #ifdef ADAPTIVE_SX - if (__predict_false(!adaptive)) - goto sleepq; /* * If the lock is write locked and the owner is * running on another CPU, spin until the owner stops @@ -762,20 +752,18 @@ retry_sleepq: * chain lock. If so, drop the sleep queue lock and try * again. */ - if (adaptive) { - if (!(x & SX_LOCK_SHARED)) { - owner = (struct thread *)SX_OWNER(x); - if (TD_IS_RUNNING(owner)) { - sleepq_release(&sx->lock_object); - sx_drop_critical(x, &in_critical, - &extra_work); - continue; - } - } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) { + if (!(x & SX_LOCK_SHARED)) { + owner = (struct thread *)SX_OWNER(x); + if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); - sx_drop_critical(x, &in_critical, &extra_work); + sx_drop_critical(x, &in_critical, + &extra_work); continue; } + } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) { + sleepq_release(&sx->lock_object); + sx_drop_critical(x, &in_critical, &extra_work); + continue; } #endif @@ -1021,7 +1009,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO #ifdef ADAPTIVE_SX volatile struct thread *owner; u_int i, n, spintries = 0; - bool adaptive; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; @@ -1066,10 +1053,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO lock_delay_arg_init(&lda, NULL); #endif -#ifdef ADAPTIVE_SX - adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0); -#endif - #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -1095,9 +1078,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO #endif #ifdef ADAPTIVE_SX - if (__predict_false(!adaptive)) - goto sleepq; - /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock @@ -1154,7 +1134,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO continue; } } -sleepq: #endif /* @@ -1176,7 +1155,7 @@ retry_sleepq: * the owner stops running or the state of the lock * changes. */ - if (!(x & SX_LOCK_SHARED) && adaptive) { + if (!(x & SX_LOCK_SHARED)) { owner = (struct thread *)SX_OWNER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); Modified: head/sys/sys/sx.h ============================================================================== --- head/sys/sys/sx.h Wed Dec 5 15:56:44 2018 (r341592) +++ head/sys/sys/sx.h Wed Dec 5 16:43:03 2018 (r341593) @@ -273,7 +273,6 @@ __sx_xunlock(struct sx *sx, struct thread *td, const c #define SX_NOPROFILE 0x02 #define SX_NOWITNESS 0x04 #define SX_QUIET 0x08 -#define SX_NOADAPTIVE 0x10 #define SX_RECURSE 0x20 #define SX_NEW 0x40
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201812051643.wB5Gh45m078405>