From owner-svn-src-user@FreeBSD.ORG Mon May 18 18:34:03 2009 Return-Path: Delivered-To: svn-src-user@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id AEEEE106566B; Mon, 18 May 2009 18:34:03 +0000 (UTC) (envelope-from kmacy@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 9CAD58FC20; Mon, 18 May 2009 18:34:03 +0000 (UTC) (envelope-from kmacy@FreeBSD.org) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id n4IIY36b071077; Mon, 18 May 2009 18:34:03 GMT (envelope-from kmacy@svn.freebsd.org) Received: (from kmacy@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id n4IIY3Jp071075; Mon, 18 May 2009 18:34:03 GMT (envelope-from kmacy@svn.freebsd.org) Message-Id: <200905181834.n4IIY3Jp071075@svn.freebsd.org> From: Kip Macy Date: Mon, 18 May 2009 18:34:03 +0000 (UTC) To: src-committers@freebsd.org, svn-src-user@freebsd.org X-SVN-Group: user MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r192322 - in user/kmacy/releng_7_2_fcs/sys: kern sys X-BeenThere: svn-src-user@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the experimental " user" src tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 18 May 2009 18:34:03 -0000 Author: kmacy Date: Mon May 18 18:34:03 2009 New Revision: 192322 URL: http://svn.freebsd.org/changeset/base/192322 Log: Merge dependent callout changes 171053 172025 172184 173760 173842 176013 Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_timeout.c user/kmacy/releng_7_2_fcs/sys/sys/callout.h Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_timeout.c ============================================================================== --- user/kmacy/releng_7_2_fcs/sys/kern/kern_timeout.c Mon May 18 18:12:45 2009 (r192321) +++ user/kmacy/releng_7_2_fcs/sys/kern/kern_timeout.c Mon May 18 18:34:03 2009 (r192322) @@ -55,9 +55,9 @@ SYSCTL_INT(_debug, OID_AUTO, to_avg_dept static int avg_gcalls; SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, "Average number of Giant callouts made per softclock call. Units = 1/1000"); -static int avg_mtxcalls; -SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0, - "Average number of mtx callouts made per softclock call. Units = 1/1000"); +static int avg_lockcalls; +SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, + "Average number of lock callouts made per softclock call. Units = 1/1000"); static int avg_mpcalls; SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, "Average number of MP callouts made per softclock call. Units = 1/1000"); @@ -82,12 +82,12 @@ static struct callout *nextsoftcheck; /* * If curr_callout is non-NULL, threads waiting in * callout_drain() will be woken up as soon as the * relevant callout completes. - * curr_cancelled - Changing to 1 with both callout_lock and c_mtx held + * curr_cancelled - Changing to 1 with both callout_lock and c_lock held * guarantees that the current callout will not run. * The softclock() function sets this to 0 before it - * drops callout_lock to acquire c_mtx, and it calls + * drops callout_lock to acquire c_lock, and it calls * the handler only if curr_cancelled is still 0 after - * c_mtx is successfully acquired. + * c_lock is successfully acquired. * callout_wait - If a thread is waiting in callout_drain(), then * callout_wait is nonzero. Set only when * curr_callout is non-NULL. @@ -170,7 +170,7 @@ softclock(void *dummy) int steps; /* #steps since we last allowed interrupts */ int depth; int mpcalls; - int mtxcalls; + int lockcalls; int gcalls; #ifdef DIAGNOSTIC struct bintime bt1, bt2; @@ -184,7 +184,7 @@ softclock(void *dummy) #endif /* MAX_SOFTCLOCK_STEPS */ mpcalls = 0; - mtxcalls = 0; + lockcalls = 0; gcalls = 0; depth = 0; steps = 0; @@ -215,14 +215,19 @@ softclock(void *dummy) } else { void (*c_func)(void *); void *c_arg; - struct mtx *c_mtx; - int c_flags; + struct lock_class *class; + struct lock_object *c_lock; + int c_flags, sharedlock; nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); TAILQ_REMOVE(bucket, c, c_links.tqe); + class = (c->c_lock != NULL) ? + LOCK_CLASS(c->c_lock) : NULL; + sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? + 0 : 1; + c_lock = c->c_lock; c_func = c->c_func; c_arg = c->c_arg; - c_mtx = c->c_mtx; c_flags = c->c_flags; if (c->c_flags & CALLOUT_LOCAL_ALLOC) { c->c_func = NULL; @@ -237,27 +242,27 @@ softclock(void *dummy) } curr_cancelled = 0; mtx_unlock_spin(&callout_lock); - if (c_mtx != NULL) { - mtx_lock(c_mtx); + if (c_lock != NULL) { + class->lc_lock(c_lock, sharedlock); /* * The callout may have been cancelled * while we switched locks. */ if (curr_cancelled) { - mtx_unlock(c_mtx); + class->lc_unlock(c_lock); goto skip; } /* The callout cannot be stopped now. */ curr_cancelled = 1; - if (c_mtx == &Giant) { + if (c_lock == &Giant.lock_object) { gcalls++; CTR3(KTR_CALLOUT, "callout %p func %p arg %p", c, c_func, c_arg); } else { - mtxcalls++; - CTR3(KTR_CALLOUT, "callout mtx" + lockcalls++; + CTR3(KTR_CALLOUT, "callout lock" " %p func %p arg %p", c, c_func, c_arg); } @@ -292,7 +297,7 @@ softclock(void *dummy) #endif CTR1(KTR_CALLOUT, "callout %p finished", c); if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) - mtx_unlock(c_mtx); + class->lc_unlock(c_lock); skip: mtx_lock_spin(&callout_lock); curr_callout = NULL; @@ -313,7 +318,7 @@ softclock(void *dummy) } avg_depth += (depth * 1000 - avg_depth) >> 8; avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; - avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8; + avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; nextsoftcheck = NULL; mtx_unlock_spin(&callout_lock); @@ -412,19 +417,14 @@ callout_reset(c, to_ticks, ftn, arg) { int cancelled = 0; -#ifdef notyet /* Some callers of timeout() do not hold Giant. */ - if (c->c_mtx != NULL) - mtx_assert(c->c_mtx, MA_OWNED); -#endif - mtx_lock_spin(&callout_lock); if (c == curr_callout) { /* * We're being asked to reschedule a callout which is - * currently in progress. If there is a mutex then we + * currently in progress. If there is a lock then we * can cancel the callout if it has not really started. */ - if (c->c_mtx != NULL && !curr_cancelled) + if (c->c_lock != NULL && !curr_cancelled) cancelled = curr_cancelled = 1; if (callout_wait) { /* @@ -483,18 +483,23 @@ _callout_stop_safe(c, safe) struct callout *c; int safe; { - int use_mtx, sq_locked; + struct lock_class *class; + int use_lock, sq_locked; - if (!safe && c->c_mtx != NULL) { -#ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */ - mtx_assert(c->c_mtx, MA_OWNED); - use_mtx = 1; -#else - use_mtx = mtx_owned(c->c_mtx); -#endif - } else { - use_mtx = 0; - } + /* + * Some old subsystems don't hold Giant while running a callout_stop(), + * so just discard this check for the moment. + */ + if (!safe && c->c_lock != NULL) { + if (c->c_lock == &Giant.lock_object) + use_lock = mtx_owned(&Giant); + else { + use_lock = 1; + class = LOCK_CLASS(c->c_lock); + class->lc_assert(c->c_lock, LA_XLOCKED); + } + } else + use_lock = 0; sq_locked = 0; again: @@ -566,12 +571,12 @@ again: PICKUP_GIANT(); mtx_lock_spin(&callout_lock); } - } else if (use_mtx && !curr_cancelled) { + } else if (use_lock && !curr_cancelled) { /* - * The current callout is waiting for it's - * mutex which we hold. Cancel the callout + * The current callout is waiting for its + * lock which we hold. Cancel the callout * and return. After our caller drops the - * mutex, the callout will be skipped in + * lock, the callout will be skipped in * softclock(). */ curr_cancelled = 1; @@ -615,28 +620,30 @@ callout_init(c, mpsafe) { bzero(c, sizeof *c); if (mpsafe) { - c->c_mtx = NULL; + c->c_lock = NULL; c->c_flags = CALLOUT_RETURNUNLOCKED; } else { - c->c_mtx = &Giant; + c->c_lock = &Giant.lock_object; c->c_flags = 0; } } void -callout_init_mtx(c, mtx, flags) +_callout_init_lock(c, lock, flags) struct callout *c; - struct mtx *mtx; + struct lock_object *lock; int flags; { bzero(c, sizeof *c); - c->c_mtx = mtx; - KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED)) == 0, - ("callout_init_mtx: bad flags %d", flags)); - /* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */ - KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, - ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex")); - c->c_flags = flags & (CALLOUT_RETURNUNLOCKED); + c->c_lock = lock; + KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, + ("callout_init_lock: bad flags %d", flags)); + KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, + ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); + KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & + (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", + __func__)); + c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); } #ifdef APM_FIXUP_CALLTODO Modified: user/kmacy/releng_7_2_fcs/sys/sys/callout.h ============================================================================== --- user/kmacy/releng_7_2_fcs/sys/sys/callout.h Mon May 18 18:12:45 2009 (r192321) +++ user/kmacy/releng_7_2_fcs/sys/sys/callout.h Mon May 18 18:34:03 2009 (r192322) @@ -40,7 +40,7 @@ #include -struct mtx; +struct lock_object; SLIST_HEAD(callout_list, callout); TAILQ_HEAD(callout_tailq, callout); @@ -53,7 +53,7 @@ struct callout { int c_time; /* ticks to the event */ void *c_arg; /* function argument */ void (*c_func)(void *); /* function to call */ - struct mtx *c_mtx; /* mutex to lock */ + struct lock_object *c_lock; /* lock to handle */ int c_flags; /* state of this entry */ }; @@ -62,6 +62,7 @@ struct callout { #define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */ #define CALLOUT_MPSAFE 0x0008 /* callout handler is mp safe */ #define CALLOUT_RETURNUNLOCKED 0x0010 /* handler returns with mtx unlocked */ +#define CALLOUT_SHAREDLOCK 0x0020 /* callout lock held in shared mode */ struct callout_handle { struct callout *callout; @@ -79,7 +80,13 @@ extern struct mtx callout_lock; #define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE) #define callout_drain(c) _callout_stop_safe(c, 1) void callout_init(struct callout *, int); -void callout_init_mtx(struct callout *, struct mtx *, int); +void _callout_init_lock(struct callout *, struct lock_object *, int); +#define callout_init_mtx(c, mtx, flags) \ + _callout_init_lock((c), ((mtx) != NULL) ? &(mtx)->lock_object : \ + NULL, (flags)) +#define callout_init_rw(c, rw, flags) \ + _callout_init_lock((c), ((rw) != NULL) ? &(rw)->lock_object : \ + NULL, (flags)) #define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING) int callout_reset(struct callout *, int, void (*)(void *), void *); #define callout_stop(c) _callout_stop_safe(c, 0)