Date: Mon, 14 Dec 2009 12:23:47 +0000 (UTC) From: Luigi Rizzo <luigi@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r200510 - head/sys/kern Message-ID: <200912141223.nBECNlDZ026381@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: luigi Date: Mon Dec 14 12:23:46 2009 New Revision: 200510 URL: http://svn.freebsd.org/changeset/base/200510 Log: Properly fix callout handling by putting all the per-cpu info in struct callout_cpu. From the comment in the file: + * There is one struct callout_cpu per cpu, holding all relevant + * state for the callout processing thread on the individual CPU. + * In particular: + * cc_ticks is incremented once per tick in callout_cpu(). + * It tracks the global 'ticks' but in a way that the individual + * threads should not worry about races in the order in which + * hardclock() and hardclock_cpu() run on the various CPUs. + * cc_softclock is advanced in callout_cpu() to point to the + * first entry in cc_callwheel that may need handling. In turn, + * a softclock() is scheduled so it can serve the various entries i + * such that cc_softclock <= i <= cc_ticks . Together with a smaller patch committed in september, this fixes a bug that affects 8.0 with apps that rely on callouts to fire exactly in the number of ticks specified (qemu among them). Right now, callouts in 8.0 fire one tick late. This was discussed in september with JeffR and jhb MFC after: 3 days Modified: head/sys/kern/kern_timeout.c Modified: head/sys/kern/kern_timeout.c ============================================================================== --- head/sys/kern/kern_timeout.c Mon Dec 14 12:19:21 2009 (r200509) +++ head/sys/kern/kern_timeout.c Mon Dec 14 12:23:46 2009 (r200510) @@ -82,6 +82,23 @@ SYSCTL_INT(_debug, OID_AUTO, to_avg_mpca */ int callwheelsize, callwheelbits, callwheelmask; +/* + * There is one struct callout_cpu per cpu, holding all relevant + * state for the callout processing thread on the individual CPU. + * In particular: + * cc_ticks is incremented once per tick in callout_cpu(). + * It tracks the global 'ticks' but in a way that the individual + * threads should not worry about races in the order in which + * hardclock() and hardclock_cpu() run on the various CPUs. + * cc_softclock is advanced in callout_cpu() to point to the + * first entry in cc_callwheel that may need handling. In turn, + * a softclock() is scheduled so it can serve the various entries i + * such that cc_softclock <= i <= cc_ticks . + * XXX maybe cc_softclock and cc_ticks should be volatile ? + * + * cc_ticks is also used in callout_reset_cpu() to determine + * when the callout should be served. + */ struct callout_cpu { struct mtx cc_lock; struct callout *cc_callout; @@ -90,6 +107,7 @@ struct callout_cpu { struct callout *cc_next; struct callout *cc_curr; void *cc_cookie; + int cc_ticks; int cc_softticks; int cc_cancel; int cc_waiting; @@ -244,7 +262,8 @@ callout_tick(void) need_softclock = 0; cc = CC_SELF(); mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); - for (; (cc->cc_softticks - ticks) <= 0; cc->cc_softticks++) { + cc->cc_ticks++; + for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { bucket = cc->cc_softticks & callwheelmask; if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { need_softclock = 1; @@ -323,7 +342,7 @@ softclock(void *arg) steps = 0; cc = (struct callout_cpu *)arg; CC_LOCK(cc); - while (cc->cc_softticks - 1 != ticks) { + while (cc->cc_softticks - 1 != cc->cc_ticks) { /* * cc_softticks may be modified by hard clock, so cache * it while we work on a given bucket. @@ -622,7 +641,7 @@ retry: c->c_arg = arg; c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); c->c_func = ftn; - c->c_time = ticks + to_ticks; + c->c_time = cc->cc_ticks + to_ticks; TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], c, c_links.tqe); CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200912141223.nBECNlDZ026381>