Date: Wed, 5 Sep 2007 21:21:11 GMT From: Marko Zec <zec@FreeBSD.org> To: Perforce Change Reviews <perforce@FreeBSD.org> Subject: PERFORCE change 126107 for review Message-ID: <200709052121.l85LLBx2009532@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=126107 Change 126107 by zec@zec_tpx32 on 2007/09/05 21:20:46 I've been foot-shooting myself several times by calling callout_init() on already active callout handles. This change examines the entire callwheel on callout_init() invocations for such a condition and panics if the handle is already linked in. This replaces a check for attempts at double-linking a callout handle in callout_reset(), which was much more expensive (callout_reset() is called much more frequently than callout_init()) and couldn't directly locate the offending callout_init() line. This extra check could be probably usefull outside of the scope of network stack virtualization changes / code... Affected files ... .. //depot/projects/vimage/src/sys/kern/kern_timeout.c#5 edit Differences ... ==== //depot/projects/vimage/src/sys/kern/kern_timeout.c#5 (text+ko) ==== @@ -37,8 +37,6 @@ #include <sys/cdefs.h> __FBSDID("$FreeBSD: src/sys/kern/kern_timeout.c,v 1.104 2007/06/26 21:42:01 attilio Exp $"); -#include "opt_vimage.h" - #include <sys/param.h> #include <sys/systm.h> #include <sys/callout.h> @@ -75,6 +73,9 @@ struct callout_tailq *callwheel; int softticks; /* Like ticks, but for softclock(). */ struct mtx callout_lock; +#ifdef INVARIANTS +static int callwheel_initialized = 0; +#endif static struct callout *nextsoftcheck; /* Next callout to be checked. */ @@ -145,6 +146,9 @@ TAILQ_INIT(&callwheel[i]); } mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); +#ifdef INVARIANTS + callwheel_initialized = 1; +#endif } /* @@ -479,23 +483,6 @@ c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); c->c_func = ftn; c->c_time = ticks + to_ticks; -#if (defined(VIMAGE) && defined(INVARIANTS)) - /* - * MARKO XXX - * - * I'm suspecting that some lockups might have been caused by - * a single callout handle being scheduled multiple times. - * This loop examines the entire callwhell before inserting a - * new handle, and if the handle is already linked in it panics. - */ - int callwheel_iter; - struct callout *c_iter; - for (callwheel_iter = 0; callwheel_iter <= callwheelmask; - callwheel_iter++) - TAILQ_FOREACH(c_iter, &callwheel[callwheel_iter], c_links.tqe) - if (c_iter == c) - panic("finally got you!"); -#endif TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", @@ -621,11 +608,36 @@ return (1); } +#ifdef INVARIANTS +/* + * Examine the entire callwhell before initializing a new handle, + * and panic if the handle was already linked in. + */ +#define CALLWHEEL_CHECK(c) \ + if (callwheel_initialized) { \ + int callwheel_iter; \ + struct callout *c_iter; \ + \ + mtx_lock_spin(&callout_lock); \ + for (callwheel_iter = 0; callwheel_iter <= callwheelmask; \ + callwheel_iter++) \ + TAILQ_FOREACH(c_iter, &callwheel[callwheel_iter], \ + c_links.tqe) \ + if (c_iter == c) \ + panic("%s() for active handle!", \ + __FUNCTION__); \ + mtx_unlock_spin(&callout_lock); \ + } +#else +#define CALLWHEEL_CHECK(c) +#endif /* INVARIANTS */ + void callout_init(c, mpsafe) struct callout *c; int mpsafe; { + CALLWHEEL_CHECK(c); bzero(c, sizeof *c); if (mpsafe) { c->c_mtx = NULL; @@ -642,6 +654,7 @@ struct mtx *mtx; int flags; { + CALLWHEEL_CHECK(c); bzero(c, sizeof *c); c->c_mtx = mtx; KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED|CALLOUT_NETGIANT)) == 0,
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200709052121.l85LLBx2009532>