Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 10 May 2012 11:08:09 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org
Subject:   svn commit: r235225 - stable/8/sys/kern
Message-ID:  <201205101108.q4AB89ec033098@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Thu May 10 11:08:09 2012
New Revision: 235225
URL: http://svn.freebsd.org/changeset/base/235225

Log:
  MFC r234981:
  Move the code to call the callout callback into the helper function
  softclock_call_cc(). While there, move some common code to callout_cc_del().

Modified:
  stable/8/sys/kern/kern_timeout.c
Directory Properties:
  stable/8/sys/   (props changed)

Modified: stable/8/sys/kern/kern_timeout.c
==============================================================================
--- stable/8/sys/kern/kern_timeout.c	Thu May 10 11:06:19 2012	(r235224)
+++ stable/8/sys/kern/kern_timeout.c	Thu May 10 11:08:09 2012	(r235225)
@@ -405,6 +405,181 @@ callout_cc_add(struct callout *c, struct
 	    c, c_links.tqe);
 }
 
+static void
+callout_cc_del(struct callout *c, struct callout_cpu *cc)
+{
+
+	if (cc->cc_next == c)
+		cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
+	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
+		c->c_func = NULL;
+		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
+	}
+}
+
+static struct callout *
+softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
+    int *lockcalls, int *gcalls)
+{
+	void (*c_func)(void *);
+	void *c_arg;
+	struct lock_class *class;
+	struct lock_object *c_lock;
+	int c_flags, sharedlock;
+#ifdef SMP
+	struct callout_cpu *new_cc;
+	void (*new_func)(void *);
+	void *new_arg;
+	int new_cpu, new_ticks;
+#endif
+#ifdef DIAGNOSTIC
+	struct bintime bt1, bt2;
+	struct timespec ts2;
+	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
+	static timeout_t *lastfunc;
+#endif
+
+	cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
+	class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
+	sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
+	c_lock = c->c_lock;
+	c_func = c->c_func;
+	c_arg = c->c_arg;
+	c_flags = c->c_flags;
+	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
+		c->c_flags = CALLOUT_LOCAL_ALLOC;
+	else
+		c->c_flags &= ~CALLOUT_PENDING;
+	cc->cc_curr = c;
+	cc->cc_cancel = 0;
+	CC_UNLOCK(cc);
+	if (c_lock != NULL) {
+		class->lc_lock(c_lock, sharedlock);
+		/*
+		 * The callout may have been cancelled
+		 * while we switched locks.
+		 */
+		if (cc->cc_cancel) {
+			class->lc_unlock(c_lock);
+			goto skip;
+		}
+		/* The callout cannot be stopped now. */
+		cc->cc_cancel = 1;
+
+		if (c_lock == &Giant.lock_object) {
+			(*gcalls)++;
+			CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
+			    c, c_func, c_arg);
+		} else {
+			(*lockcalls)++;
+			CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
+			    c, c_func, c_arg);
+		}
+	} else {
+		(*mpcalls)++;
+		CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p",
+		    c, c_func, c_arg);
+	}
+#ifdef DIAGNOSTIC
+	binuptime(&bt1);
+#endif
+	THREAD_NO_SLEEPING();
+	SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0);
+	c_func(c_arg);
+	SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0);
+	THREAD_SLEEPING_OK();
+#ifdef DIAGNOSTIC
+	binuptime(&bt2);
+	bintime_sub(&bt2, &bt1);
+	if (bt2.frac > maxdt) {
+		if (lastfunc != c_func || bt2.frac > maxdt * 2) {
+			bintime2timespec(&bt2, &ts2);
+			printf(
+		"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
+			    c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
+		}
+		maxdt = bt2.frac;
+		lastfunc = c_func;
+	}
+#endif
+	CTR1(KTR_CALLOUT, "callout %p finished", c);
+	if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
+		class->lc_unlock(c_lock);
+skip:
+	CC_LOCK(cc);
+	/*
+	 * If the current callout is locally allocated (from
+	 * timeout(9)) then put it on the freelist.
+	 *
+	 * Note: we need to check the cached copy of c_flags because
+	 * if it was not local, then it's not safe to deref the
+	 * callout pointer.
+	 */
+	if (c_flags & CALLOUT_LOCAL_ALLOC) {
+		KASSERT(c->c_flags == CALLOUT_LOCAL_ALLOC,
+		    ("corrupted callout"));
+		c->c_func = NULL;
+		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
+	}
+	cc->cc_curr = NULL;
+	if (cc->cc_waiting) {
+		/*
+		 * There is someone waiting for the
+		 * callout to complete.
+		 * If the callout was scheduled for
+		 * migration just cancel it.
+		 */
+		if (cc_cme_migrating(cc))
+			cc_cme_cleanup(cc);
+		cc->cc_waiting = 0;
+		CC_UNLOCK(cc);
+		wakeup(&cc->cc_waiting);
+		CC_LOCK(cc);
+	} else if (cc_cme_migrating(cc)) {
+#ifdef SMP
+		/*
+		 * If the callout was scheduled for
+		 * migration just perform it now.
+		 */
+		new_cpu = cc->cc_migration_cpu;
+		new_ticks = cc->cc_migration_ticks;
+		new_func = cc->cc_migration_func;
+		new_arg = cc->cc_migration_arg;
+		cc_cme_cleanup(cc);
+
+		/*
+		 * Handle deferred callout stops
+		 */
+		if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
+			CTR3(KTR_CALLOUT,
+			     "deferred cancelled %p func %p arg %p",
+			     c, new_func, new_arg);
+			callout_cc_del(c, cc);
+			goto nextc;
+		}
+
+		c->c_flags &= ~CALLOUT_DFRMIGRATION;
+
+		/*
+		 * It should be assert here that the
+		 * callout is not destroyed but that
+		 * is not easy.
+		 */
+		new_cc = callout_cpu_switch(c, cc, new_cpu);
+		callout_cc_add(c, new_cc, new_ticks, new_func, new_arg,
+		    new_cpu);
+		CC_UNLOCK(new_cc);
+		CC_LOCK(cc);
+#else
+		panic("migration should not happen");
+#endif
+	}
+#ifdef SMP
+nextc:
+#endif
+	return (cc->cc_next);
+}
+
 /*
  * The callout mechanism is based on the work of Adam M. Costello and 
  * George Varghese, published in a technical report entitled "Redesigning
@@ -433,12 +608,6 @@ softclock(void *arg)
 	int mpcalls;
 	int lockcalls;
 	int gcalls;
-#ifdef DIAGNOSTIC
-	struct bintime bt1, bt2;
-	struct timespec ts2;
-	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
-	static timeout_t *lastfunc;
-#endif
 
 #ifndef MAX_SOFTCLOCK_STEPS
 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
@@ -460,7 +629,7 @@ softclock(void *arg)
 		cc->cc_softticks++;
 		bucket = &cc->cc_callwheel[curticks & callwheelmask];
 		c = TAILQ_FIRST(bucket);
-		while (c) {
+		while (c != NULL) {
 			depth++;
 			if (c->c_time != curticks) {
 				c = TAILQ_NEXT(c, c_links.tqe);
@@ -475,189 +644,10 @@ softclock(void *arg)
 					steps = 0;
 				}
 			} else {
-				void (*c_func)(void *);
-				void *c_arg;
-				struct lock_class *class;
-				struct lock_object *c_lock;
-				int c_flags, sharedlock;
-
-				cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
 				TAILQ_REMOVE(bucket, c, c_links.tqe);
-				class = (c->c_lock != NULL) ?
-				    LOCK_CLASS(c->c_lock) : NULL;
-				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
-				    0 : 1;
-				c_lock = c->c_lock;
-				c_func = c->c_func;
-				c_arg = c->c_arg;
-				c_flags = c->c_flags;
-				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
-					c->c_flags = CALLOUT_LOCAL_ALLOC;
-				} else {
-					c->c_flags =
-					    (c->c_flags & ~CALLOUT_PENDING);
-				}
-				cc->cc_curr = c;
-				cc->cc_cancel = 0;
-				CC_UNLOCK(cc);
-				if (c_lock != NULL) {
-					class->lc_lock(c_lock, sharedlock);
-					/*
-					 * The callout may have been cancelled
-					 * while we switched locks.
-					 */
-					if (cc->cc_cancel) {
-						class->lc_unlock(c_lock);
-						goto skip;
-					}
-					/* The callout cannot be stopped now. */
-					cc->cc_cancel = 1;
-
-					if (c_lock == &Giant.lock_object) {
-						gcalls++;
-						CTR3(KTR_CALLOUT,
-						    "callout %p func %p arg %p",
-						    c, c_func, c_arg);
-					} else {
-						lockcalls++;
-						CTR3(KTR_CALLOUT, "callout lock"
-						    " %p func %p arg %p",
-						    c, c_func, c_arg);
-					}
-				} else {
-					mpcalls++;
-					CTR3(KTR_CALLOUT,
-					    "callout mpsafe %p func %p arg %p",
-					    c, c_func, c_arg);
-				}
-#ifdef DIAGNOSTIC
-				binuptime(&bt1);
-#endif
-				THREAD_NO_SLEEPING();
-				SDT_PROBE(callout_execute, kernel, ,
-				    callout_start, c, 0, 0, 0, 0);
-				c_func(c_arg);
-				SDT_PROBE(callout_execute, kernel, ,
-				    callout_end, c, 0, 0, 0, 0);
-				THREAD_SLEEPING_OK();
-#ifdef DIAGNOSTIC
-				binuptime(&bt2);
-				bintime_sub(&bt2, &bt1);
-				if (bt2.frac > maxdt) {
-					if (lastfunc != c_func ||
-					    bt2.frac > maxdt * 2) {
-						bintime2timespec(&bt2, &ts2);
-						printf(
-			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
-						    c_func, c_arg,
-						    (intmax_t)ts2.tv_sec,
-						    ts2.tv_nsec);
-					}
-					maxdt = bt2.frac;
-					lastfunc = c_func;
-				}
-#endif
-				CTR1(KTR_CALLOUT, "callout %p finished", c);
-				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
-					class->lc_unlock(c_lock);
-			skip:
-				CC_LOCK(cc);
-				/*
-				 * If the current callout is locally
-				 * allocated (from timeout(9))
-				 * then put it on the freelist.
-				 *
-				 * Note: we need to check the cached
-				 * copy of c_flags because if it was not
-				 * local, then it's not safe to deref the
-				 * callout pointer.
-				 */
-				if (c_flags & CALLOUT_LOCAL_ALLOC) {
-					KASSERT(c->c_flags ==
-					    CALLOUT_LOCAL_ALLOC,
-					    ("corrupted callout"));
-					c->c_func = NULL;
-					SLIST_INSERT_HEAD(&cc->cc_callfree, c,
-					    c_links.sle);
-				}
-				cc->cc_curr = NULL;
-				if (cc->cc_waiting) {
-
-					/*
-					 * There is someone waiting for the
-					 * callout to complete.
-					 * If the callout was scheduled for
-					 * migration just cancel it.
-					 */
-					if (cc_cme_migrating(cc))
-						cc_cme_cleanup(cc);
-					cc->cc_waiting = 0;
-					CC_UNLOCK(cc);
-					wakeup(&cc->cc_waiting);
-					CC_LOCK(cc);
-				} else if (cc_cme_migrating(cc)) {
-#ifdef SMP
-					struct callout_cpu *new_cc;
-					void (*new_func)(void *);
-					void *new_arg;
-					int new_cpu, new_ticks;
-
-					/*
-					 * If the callout was scheduled for
-					 * migration just perform it now.
-					 */
-					new_cpu = cc->cc_migration_cpu;
-					new_ticks = cc->cc_migration_ticks;
-					new_func = cc->cc_migration_func;
-					new_arg = cc->cc_migration_arg;
-					cc_cme_cleanup(cc);
-
-					/*
-					 * Handle deferred callout stops
-					 */
-					if ((c->c_flags & CALLOUT_DFRMIGRATION)
-					    == 0) {
-						CTR3(KTR_CALLOUT,
-					"deferred cancelled %p func %p arg %p",
-						    c, new_func, new_arg);
-						if (cc->cc_next == c) {
-							cc->cc_next =
-							    TAILQ_NEXT(c,
-							    c_links.tqe);
-						}
-						if (c->c_flags &
-						    CALLOUT_LOCAL_ALLOC) {
-							c->c_func = NULL;
-							SLIST_INSERT_HEAD(
-							    &cc->cc_callfree, c,
-							    c_links.sle);
-						}
-						goto nextc;
-					} else {
-						c->c_flags &= ~
-						    CALLOUT_DFRMIGRATION;
-					}
-
-					/*
-					 * It should be assert here that the
-					 * callout is not destroyed but that
-					 * is not easy.
-					 */
-					new_cc = callout_cpu_switch(c, cc,
-					    new_cpu);
-					callout_cc_add(c, new_cc, new_ticks,
-					    new_func, new_arg, new_cpu);
-					CC_UNLOCK(new_cc);
-					CC_LOCK(cc);
-#else
-					panic("migration should not happen");
-#endif
-				}
-#ifdef SMP
-nextc:
-#endif
+				c = softclock_call_cc(c, cc, &mpcalls,
+				    &lockcalls, &gcalls);
 				steps = 0;
-				c = cc->cc_next;
 			}
 		}
 	}
@@ -1000,19 +990,12 @@ again:
 
 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
 
-	if (cc->cc_next == c) {
-		cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
-	}
-	TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
-	    c_links.tqe);
-
 	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
 	    c, c->c_func, c->c_arg);
+	TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
+	    c_links.tqe);
+	callout_cc_del(c, cc);
 
-	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
-		c->c_func = NULL;
-		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
-	}
 	CC_UNLOCK(cc);
 	return (1);
 }



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201205101108.q4AB89ec033098>