From owner-p4-projects@FreeBSD.ORG Fri Jul 6 13:47:09 2007 Return-Path: X-Original-To: p4-projects@freebsd.org Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 731C616A469; Fri, 6 Jul 2007 13:47:09 +0000 (UTC) X-Original-To: perforce@FreeBSD.org Delivered-To: perforce@FreeBSD.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 3FAAB16A400 for ; Fri, 6 Jul 2007 13:47:09 +0000 (UTC) (envelope-from rdivacky@FreeBSD.org) Received: from repoman.freebsd.org (repoman.freebsd.org [69.147.83.41]) by mx1.freebsd.org (Postfix) with ESMTP id 2F53F13C44B for ; Fri, 6 Jul 2007 13:47:09 +0000 (UTC) (envelope-from rdivacky@FreeBSD.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.13.8/8.13.8) with ESMTP id l66Dl9BA008237 for ; Fri, 6 Jul 2007 13:47:09 GMT (envelope-from rdivacky@FreeBSD.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.13.8/8.13.8/Submit) id l66Dl8E5008233 for perforce@freebsd.org; Fri, 6 Jul 2007 13:47:08 GMT (envelope-from rdivacky@FreeBSD.org) Date: Fri, 6 Jul 2007 13:47:08 GMT Message-Id: <200707061347.l66Dl8E5008233@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to rdivacky@FreeBSD.org using -f From: Roman Divacky To: Perforce Change Reviews Cc: Subject: PERFORCE change 123016 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 06 Jul 2007 13:47:09 -0000 http://perforce.freebsd.org/chv.cgi?CH=123016 Change 123016 by rdivacky@rdivacky_witten on 2007/07/06 13:46:23 IFC Affected files ... .. //depot/projects/soc2007/rdivacky/linux_at/lib/libc/gen/Symbol.map#1 branch .. //depot/projects/soc2007/rdivacky/linux_at/sys/conf/options#7 integrate .. //depot/projects/soc2007/rdivacky/linux_at/sys/kern/kern_sx.c#3 integrate .. //depot/projects/soc2007/rdivacky/linux_at/sys/sys/sx.h#3 integrate Differences ... ==== //depot/projects/soc2007/rdivacky/linux_at/sys/conf/options#7 (text+ko) ==== @@ -1,4 +1,4 @@ -# $FreeBSD: src/sys/conf/options,v 1.599 2007/07/04 00:18:38 bz Exp $ +# $FreeBSD: src/sys/conf/options,v 1.600 2007/07/06 13:20:43 attilio Exp $ # # On the handling of kernel options # @@ -538,6 +538,7 @@ MUTEX_NOINLINE opt_global.h LOCK_PROFILING opt_global.h LOCK_PROFILING_FAST opt_global.h +LOCK_PROFILING_SHARED opt_global.h MSIZE opt_global.h REGRESSION opt_global.h RESTARTABLE_PANICS opt_global.h ==== //depot/projects/soc2007/rdivacky/linux_at/sys/kern/kern_sx.c#3 (text+ko) ==== @@ -40,7 +40,7 @@ #include "opt_ddb.h" #include -__FBSDID("$FreeBSD: src/sys/kern/kern_sx.c,v 1.53 2007/05/31 09:14:47 attilio Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/kern_sx.c,v 1.54 2007/07/06 13:20:44 attilio Exp $"); #include #include @@ -63,6 +63,9 @@ #error "You must have SMP to enable the ADAPTIVE_SX option" #endif +CTASSERT(((SX_ADAPTIVESPIN | SX_RECURSE) & LO_CLASSFLAGS) == + (SX_ADAPTIVESPIN | SX_RECURSE)); + /* Handy macros for sleep queues. */ #define SQ_EXCLUSIVE_QUEUE 0 #define SQ_SHARED_QUEUE 1 @@ -287,8 +290,10 @@ curthread->td_locks--; WITNESS_UNLOCK(&sx->lock_object, 0, file, line); LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); +#ifdef LOCK_PROFILING_SHARED if (SX_SHARERS(sx->sx_lock) == 1) lock_profile_release_lock(&sx->lock_object); +#endif __sx_sunlock(sx, file, line); } @@ -412,23 +417,21 @@ #ifdef ADAPTIVE_SX volatile struct thread *owner; #endif + uint64_t waittime = 0; uintptr_t x; int contested = 0, error = 0; - uint64_t waitstart = 0; /* If we already hold an exclusive lock, then recurse. */ if (sx_xlocked(sx)) { KASSERT((sx->lock_object.lo_flags & SX_RECURSE) != 0, ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", - sx->lock_object.lo_name, file, line)); + sx->lock_object.lo_name, file, line)); sx->sx_recurse++; atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); return (0); } - lock_profile_obtain_lock_failed(&(sx)->lock_object, - &contested, &waitstart); if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, @@ -452,6 +455,8 @@ "%s: spinning on %p held by %p", __func__, sx, owner); GIANT_SAVE(); + lock_profile_obtain_lock_failed( + &sx->lock_object, &contested, &waittime); while (SX_OWNER(sx->sx_lock) == x && TD_IS_RUNNING(owner)) cpu_spinwait(); @@ -538,6 +543,8 @@ __func__, sx); GIANT_SAVE(); + lock_profile_obtain_lock_failed(&sx->lock_object, &contested, + &waittime); sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); @@ -560,8 +567,8 @@ GIANT_RESTORE(); if (!error) - lock_profile_obtain_lock_success(&(sx)->lock_object, contested, - waitstart, file, line); + lock_profile_obtain_lock_success(&sx->lock_object, contested, + waittime, file, line); return (error); } @@ -629,14 +636,17 @@ #ifdef ADAPTIVE_SX volatile struct thread *owner; #endif +#ifdef LOCK_PROFILING_SHARED + uint64_t waittime = 0; + int contested = 0; +#endif uintptr_t x; - uint64_t waitstart = 0; - int contested = 0, error = 0; + int error = 0; + /* * As with rwlocks, we don't make any attempt to try to block * shared locks once there is an exclusive waiter. */ - for (;;) { x = sx->sx_lock; @@ -650,10 +660,12 @@ MPASS(!(x & SX_LOCK_SHARED_WAITERS)); if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) { +#ifdef LOCK_PROFILING_SHARED if (SX_SHARERS(x) == 0) lock_profile_obtain_lock_success( &sx->lock_object, contested, - waitstart, file, line); + waittime, file, line); +#endif if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", __func__, @@ -661,9 +673,6 @@ (void *)(x + SX_ONE_SHARER)); break; } - lock_profile_obtain_lock_failed(&sx->lock_object, &contested, - &waitstart); - continue; } @@ -677,23 +686,22 @@ x = SX_OWNER(x); owner = (struct thread *)x; if (TD_IS_RUNNING(owner)) { - lock_profile_obtain_lock_failed(&sx->lock_object, &contested, - &waitstart); if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, sx, owner); GIANT_SAVE(); +#ifdef LOCK_PROFILING_SHARED + lock_profile_obtain_lock_failed( + &sx->lock_object, &contested, &waittime); +#endif while (SX_OWNER(sx->sx_lock) == x && TD_IS_RUNNING(owner)) cpu_spinwait(); continue; } - } + } #endif - else - lock_profile_obtain_lock_failed(&sx->lock_object, &contested, - &waitstart); /* * Some other thread already has an exclusive lock, so @@ -750,8 +758,12 @@ if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", __func__, sx); - + GIANT_SAVE(); +#ifdef LOCK_PROFILING_SHARED + lock_profile_obtain_lock_failed(&sx->lock_object, &contested, + &waittime); +#endif sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); @@ -822,7 +834,6 @@ MPASS(x == SX_SHARERS_LOCK(1)); if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) { - lock_profile_release_lock(&sx->lock_object); if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p last succeeded", __func__, sx); @@ -837,7 +848,6 @@ */ MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); - lock_profile_release_lock(&sx->lock_object); sleepq_lock(&sx->lock_object); /* ==== //depot/projects/soc2007/rdivacky/linux_at/sys/sys/sx.h#3 (text+ko) ==== @@ -26,7 +26,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * - * $FreeBSD: src/sys/sys/sx.h,v 1.36 2007/05/31 09:14:47 attilio Exp $ + * $FreeBSD: src/sys/sys/sx.h,v 1.37 2007/07/06 13:20:44 attilio Exp $ */ #ifndef _SYS_SX_H_ @@ -178,9 +178,11 @@ if (!(x & SX_LOCK_SHARED) || !atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) error = _sx_slock_hard(sx, opts, file, line); - else +#ifdef LOCK_PROFILING_SHARED + else if (SX_SHARERS(x) == 0) lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, file, line); +#endif return (error); }