From owner-svn-src-all@freebsd.org Sun Feb 5 04:54:22 2017 Return-Path: Delivered-To: svn-src-all@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 4D3F5CC5D9B; Sun, 5 Feb 2017 04:54:22 +0000 (UTC) (envelope-from mjg@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 0B85B1009; Sun, 5 Feb 2017 04:54:21 +0000 (UTC) (envelope-from mjg@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id v154sLIt072684; Sun, 5 Feb 2017 04:54:21 GMT (envelope-from mjg@FreeBSD.org) Received: (from mjg@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id v154sKN3072682; Sun, 5 Feb 2017 04:54:20 GMT (envelope-from mjg@FreeBSD.org) Message-Id: <201702050454.v154sKN3072682@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: mjg set sender to mjg@FreeBSD.org using -f From: Mateusz Guzik Date: Sun, 5 Feb 2017 04:54:20 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r313271 - in head/sys: kern sys X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 05 Feb 2017 04:54:22 -0000 Author: mjg Date: Sun Feb 5 04:54:20 2017 New Revision: 313271 URL: https://svnweb.freebsd.org/changeset/base/313271 Log: sx: switch to fcmpset Discussed with: jhb Tested by: pho (previous version) Modified: head/sys/kern/kern_sx.c head/sys/sys/sx.h Modified: head/sys/kern/kern_sx.c ============================================================================== --- head/sys/kern/kern_sx.c Sun Feb 5 04:53:13 2017 (r313270) +++ head/sys/kern/kern_sx.c Sun Feb 5 04:54:20 2017 (r313271) @@ -530,15 +530,14 @@ sx_downgrade_(struct sx *sx, const char * accessible from at least sx.h. */ int -_sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, - int line) +_sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts, + const char *file, int line) { GIANT_DECLARE; #ifdef ADAPTIVE_SX volatile struct thread *owner; u_int i, spintries = 0; #endif - uintptr_t x; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; @@ -563,8 +562,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t lock_delay_arg_init(&lda, NULL); #endif - x = SX_READ_VALUE(sx); - /* If we already hold an exclusive lock, then recurse. */ if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) { KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, @@ -587,9 +584,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t #endif for (;;) { if (x == SX_LOCK_UNLOCKED) { - if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, tid)) + if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) break; - x = SX_READ_VALUE(sx); continue; } #ifdef KDTRACE_HOOKS @@ -902,7 +898,7 @@ _sx_slock_hard(struct sx *sx, int opts, */ if (x & SX_LOCK_SHARED) { MPASS(!(x & SX_LOCK_SHARED_WAITERS)); - if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, + if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, @@ -911,7 +907,6 @@ _sx_slock_hard(struct sx *sx, int opts, (void *)(x + SX_ONE_SHARER)); break; } - x = SX_READ_VALUE(sx); continue; } #ifdef KDTRACE_HOOKS @@ -1085,7 +1080,7 @@ _sx_sunlock_hard(struct sx *sx, const ch * so, just drop one and return. */ if (SX_SHARERS(x) > 1) { - if (atomic_cmpset_rel_ptr(&sx->sx_lock, x, + if (atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, x - SX_ONE_SHARER)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, @@ -1094,8 +1089,6 @@ _sx_sunlock_hard(struct sx *sx, const ch (void *)(x - SX_ONE_SHARER)); break; } - - x = SX_READ_VALUE(sx); continue; } @@ -1105,14 +1098,14 @@ _sx_sunlock_hard(struct sx *sx, const ch */ if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { MPASS(x == SX_SHARERS_LOCK(1)); - if (atomic_cmpset_rel_ptr(&sx->sx_lock, - SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) { + x = SX_SHARERS_LOCK(1); + if (atomic_fcmpset_rel_ptr(&sx->sx_lock, + &x, SX_LOCK_UNLOCKED)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p last succeeded", __func__, sx); break; } - x = SX_READ_VALUE(sx); continue; } Modified: head/sys/sys/sx.h ============================================================================== --- head/sys/sys/sx.h Sun Feb 5 04:53:13 2017 (r313270) +++ head/sys/sys/sx.h Sun Feb 5 04:54:20 2017 (r313271) @@ -109,7 +109,7 @@ int _sx_slock(struct sx *sx, int opts, c int _sx_xlock(struct sx *sx, int opts, const char *file, int line); void _sx_sunlock(struct sx *sx, const char *file, int line); void _sx_xunlock(struct sx *sx, const char *file, int line); -int _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, +int _sx_xlock_hard(struct sx *sx, uintptr_t v, uintptr_t tid, int opts, const char *file, int line); int _sx_slock_hard(struct sx *sx, int opts, const char *file, int line); void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int @@ -153,11 +153,12 @@ __sx_xlock(struct sx *sx, struct thread int line) { uintptr_t tid = (uintptr_t)td; + uintptr_t v; int error = 0; - if (sx->sx_lock != SX_LOCK_UNLOCKED || - !atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) - error = _sx_xlock_hard(sx, tid, opts, file, line); + v = SX_LOCK_UNLOCKED; + if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &v, tid)) + error = _sx_xlock_hard(sx, v, tid, opts, file, line); else LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 0, 0, file, line, LOCKSTAT_WRITER); @@ -174,8 +175,7 @@ __sx_xunlock(struct sx *sx, struct threa if (sx->sx_recurse == 0) LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); - if (sx->sx_lock != tid || - !atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) + if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) _sx_xunlock_hard(sx, tid, file, line); }