Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 3 Dec 2015 19:35:10 +0000 (UTC)
From:      Hans Petter Selasky <hselasky@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r291714 - in projects/hps_head: share/man/man9 sys/kern sys/netinet sys/sys
Message-ID:  <201512031935.tB3JZAxU013341@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: hselasky
Date: Thu Dec  3 19:35:10 2015
New Revision: 291714
URL: https://svnweb.freebsd.org/changeset/base/291714

Log:
  Integrate recent callout API changes and define the callout return
  values as suggested by Gleb Smirnoff. Update the timeout(9) manual
  page.

Modified:
  projects/hps_head/share/man/man9/timeout.9
  projects/hps_head/sys/kern/kern_timeout.c
  projects/hps_head/sys/netinet/tcp_timer.c
  projects/hps_head/sys/sys/callout.h

Modified: projects/hps_head/share/man/man9/timeout.9
==============================================================================
--- projects/hps_head/share/man/man9/timeout.9	Thu Dec  3 18:04:43 2015	(r291713)
+++ projects/hps_head/share/man/man9/timeout.9	Thu Dec  3 19:35:10 2015	(r291714)
@@ -29,14 +29,14 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd January 24, 2015
+.Dd December 3, 2015
 .Dt TIMEOUT 9
 .Os
 .Sh NAME
 .Nm callout_active ,
 .Nm callout_deactivate ,
+.Nm callout_async_drain ,
 .Nm callout_drain ,
-.Nm callout_drain_async ,
 .Nm callout_handle_init ,
 .Nm callout_init ,
 .Nm callout_init_mtx ,
@@ -66,6 +66,67 @@
 typedef void timeout_t (void *);
 typedef void callout_func_t (void *);
 .Ed
+.Ft int
+.Fn callout_active "struct callout *c"
+.Ft void
+.Fn callout_deactivate "struct callout *c"
+.Ft int
+.Fn callout_async_drain "struct callout *c" "callout_func_t *drain"
+.Ft int
+.Fn callout_drain "struct callout *c"
+.Ft void
+.Fn callout_handle_init "struct callout_handle *handle"
+.Bd -literal
+struct callout_handle handle = CALLOUT_HANDLE_INITIALIZER(&handle);
+.Ed
+.Ft void
+.Fn callout_init "struct callout *c" "int mpsafe"
+.Ft void
+.Fn callout_init_mtx "struct callout *c" "struct mtx *mtx" "int flags"
+.Ft void
+.Fn callout_init_rm "struct callout *c" "struct rmlock *rm" "int flags"
+.Ft void
+.Fn callout_init_rw "struct callout *c" "struct rwlock *rw" "int flags"
+.Ft int
+.Fn callout_pending "struct callout *c"
+.Ft int
+.Fn callout_reset "struct callout *c" "int ticks" "timeout_t *func" "void *arg"
+.Ft int
+.Fn callout_reset_curcpu "struct callout *c" "int ticks" "timeout_t *func" \
+"void *arg"
+.Ft int
+.Fn callout_reset_on "struct callout *c" "int ticks" "timeout_t *func" \
+"void *arg" "int cpu"
+.Ft int
+.Fn callout_reset_sbt "struct callout *c" "sbintime_t sbt" \
+"sbintime_t pr" "timeout_t *func" "void *arg" "int flags"
+.Ft int
+.Fn callout_reset_sbt_curcpu "struct callout *c" "sbintime_t sbt" \
+"sbintime_t pr" "timeout_t *func" "void *arg" "int flags"
+.Ft int
+.Fn callout_reset_sbt_on "struct callout *c" "sbintime_t sbt" \
+"sbintime_t pr" "timeout_t *func" "void *arg" "int cpu" "int flags"
+.Ft int
+.Fn callout_schedule "struct callout *c" "int ticks"
+.Ft int
+.Fn callout_schedule_curcpu "struct callout *c" "int ticks"
+.Ft int
+.Fn callout_schedule_on "struct callout *c" "int ticks" "int cpu"
+.Ft int
+.Fn callout_schedule_sbt "struct callout *c" "sbintime_t sbt" \
+"sbintime_t pr" "int flags"
+.Ft int
+.Fn callout_schedule_sbt_curcpu "struct callout *c" "sbintime_t sbt" \
+"sbintime_t pr" "int flags"
+.Ft int
+.Fn callout_schedule_sbt_on "struct callout *c" "sbintime_t sbt" \
+"sbintime_t pr" "int cpu" "int flags"
+.Ft int
+.Fn callout_stop "struct callout *c"
+.Ft struct callout_handle
+.Fn timeout "timeout_t *func" "void *arg" "int ticks"
+.Ft void
+.Fn untimeout "timeout_t *func" "void *arg" "struct callout_handle handle"
 .Sh DESCRIPTION
 The
 .Nm callout
@@ -80,7 +141,7 @@ stores the full state about any pending 
 must be drained by a call to
 .Fn callout_drain
 or
-.Fn callout_drain_async
+.Fn callout_async_drain
 before freeing.
 .Sh INITIALIZATION
 .Ft void
@@ -246,9 +307,11 @@ argument.
 The number of ticks in a second is defined by
 .Dv hz
 and can vary from system to system.
-This function returns a non-zero value if the given callout was pending and
-the callback function was prevented from being called.
-Otherwise, a value of zero is returned.
+This function returns either
+.Dv CALLOUT_RET_STOPPED ,
+.Dv CALLOUT_RET_CANCELLED
+or
+.Dv CALLOUT_RET_DRAINING .
 If a lock is associated with the callout given by the
 .Fa c
 argument and it is exclusivly locked when this function is called, this
@@ -439,7 +502,7 @@ or the
 or the
 .Fn callout_drain
 or the
-.Fn callout_drain_async
+.Fn callout_async_drain
 function is called on the same callout as given by the
 .Fa c
 argument.
@@ -484,9 +547,11 @@ This function is used to stop a timeout 
 .Fa c
 argument, in a non-blocking fashion.
 This function can be called multiple times in a row with no side effects, even if the callout is already stopped. This function however should not be called before the callout has been initialized.
-This function returns a non-zero value if the given callout was pending and
-the callback function was prevented from being called.
-Else a value of zero is returned.
+This function returns either
+.Dv CALLOUT_RET_STOPPED ,
+.Dv CALLOUT_RET_CANCELLED
+or
+.Dv CALLOUT_RET_DRAINING .
 If a lock is associated with the callout given by the
 .Fa c
 argument and it is exclusivly locked when this function is called, the
@@ -509,26 +574,50 @@ When this function returns, it is safe t
 argument.
 .Pp
 .Ft int
-.Fn callout_drain_async "struct callout *c" "callout_func_t *fn" "void *arg"
+.Fn callout_async_drain "struct callout *c" "callout_func_t *drain"
 This function is non-blocking and works the same like the
 .Fn callout_stop
-function except if it returns non-zero it means the callback function pointed to by the
-.Fa fn
+function except if it returns
+.Dv CALLOUT_RET_DRAINING
+it means the callback function pointed to by the
+.Fa drain
 argument will be called back with the
 .Fa arg
 argument when all references to the callout pointed to by the
 .Fa c
 argument are gone.
-If this function returns non-zero it should not be called again until the callback function has been called.
+If this function returns
+.Dv CALLOUT_RET_DRAINING
+it should not be called again until the callback function has been called.
+Note that when stopping multiple callouts that use the same lock it is possible
+to get multiple return values of
+.Dv CALLOUT_RET_DRAINING
+and multiple calls to the
+.Fa drain
+function, depending upon which CPU's the callouts are running on. The
+.Fa drain
+function itself is called unlocked from the context of the completing
+callout either softclock or hardclock, just like a callout itself.
 If the
 .Fn callout_drain
 or
-.Fn callout_drain_async
+.Fn callout_async_drain
 functions are called while an asynchronous drain is pending,
 previously pending asynchronous drains might get cancelled.
-If this function returns zero, it is safe to free the callout structure pointed to by the
+If this function returns a value different from
+.Dv CALLOUT_RET_DRAINING
+it is safe to free the callout structure pointed to by the
 .Fa c
 argument right away.
+.Sh CALLOUT FUNCTION RETURN VALUES
+.Bl -tag -width ".Dv CALLOUT_RET_CANCELLED"
+.It CALLOUT_RET_DRAINING
+The callout cannot be stopped and needs to be drained.
+.It CALLOUT_RET_CANCELLED
+The callout was successfully stopped.
+.It CALLOUT_RET_STOPPED
+The callout was already stopped.
+.El
 .Sh CALLOUT FUNCTION RESTRICTIONS
 Callout functions must not sleep.
 They may not acquire sleepable locks, wait on condition variables,
@@ -549,7 +638,7 @@ Softclock threads for CPUs other than ze
 respective CPUs by setting the
 .Va kern.pin_pcpu_swi
 loader tunable to a non-zero value.
-.Sh "AVOIDING RACE CONDITIONS"
+.Sh AVOIDING RACE CONDITIONS
 The callout subsystem invokes callout functions from its own thread
 context.
 Without some kind of synchronization,
@@ -615,7 +704,8 @@ callout function is about to be called.
 For example:
 .Bd -literal -offset indent
 if (sc->sc_flags & SCFLG_CALLOUT_RUNNING) {
-	if (callout_stop(&sc->sc_callout)) {
+	if (callout_stop(&sc->sc_callout) ==
+	    CALLOUT_RET_CANCELLED) {
 		sc->sc_flags &= ~SCFLG_CALLOUT_RUNNING;
 		/* successfully stopped */
 	} else {

Modified: projects/hps_head/sys/kern/kern_timeout.c
==============================================================================
--- projects/hps_head/sys/kern/kern_timeout.c	Thu Dec  3 18:04:43 2015	(r291713)
+++ projects/hps_head/sys/kern/kern_timeout.c	Thu Dec  3 19:35:10 2015	(r291714)
@@ -123,10 +123,6 @@ SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi
  */
 u_int callwheelsize, callwheelmask;
 
-#define	CALLOUT_RET_NORMAL	0
-#define	CALLOUT_RET_CANCELLED	1
-#define	CALLOUT_RET_DRAINING	2
-
 struct callout_args {
 	sbintime_t time;		/* absolute time for the event */
 	sbintime_t precision;		/* delta allowed wrt opt */
@@ -298,12 +294,10 @@ struct cc_exec {
 	bool cc_cancel;
 	/*
 	 * The "cc_drain_fn" points to a function which shall be
-	 * called with the argument stored in "cc_drain_arg" when an
-	 * asynchronous drain is performed. This field is write
-	 * protected by the "cc_lock" spinlock.
+	 * called when an asynchronous drain is performed. This field
+	 * is write protected by the "cc_lock" spinlock.
 	 */
 	callout_func_t *cc_drain_fn;
-	void *cc_drain_arg;
 	/*
 	 * The following fields are used for callout profiling only:
 	 */
@@ -338,7 +332,6 @@ struct callout_cpu {
 #define	cc_exec_restart(cc, dir)	(cc)->cc_exec_entity[(dir)].cc_restart
 #define	cc_exec_cancel(cc, dir)		(cc)->cc_exec_entity[(dir)].cc_cancel
 #define	cc_exec_drain_fn(cc, dir)	(cc)->cc_exec_entity[(dir)].cc_drain_fn
-#define	cc_exec_drain_arg(cc, dir)	(cc)->cc_exec_entity[(dir)].cc_drain_arg
 #define	cc_exec_depth(cc, dir)		(cc)->cc_exec_entity[(dir)].cc_depth
 #define	cc_exec_mpcalls(cc, dir)	(cc)->cc_exec_entity[(dir)].cc_mpcalls
 #define	cc_exec_lockcalls(cc, dir)	(cc)->cc_exec_entity[(dir)].cc_lockcalls
@@ -691,7 +684,7 @@ callout_cc_add_locked(struct callout *c,
 	CC_LOCK_ASSERT(cc);
 
 	/* update flags before swapping locks, if any */
-	c->c_flags &= ~(CALLOUT_PROCESSED | CALLOUT_DIRECT | CALLOUT_DEFRESTART);
+	c->c_flags &= ~(CALLOUT_PROCESSED | CALLOUT_DIRECT);
 	if (coa->flags & C_DIRECT_EXEC)
 		c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING | CALLOUT_DIRECT);
 	else
@@ -784,7 +777,6 @@ softclock_call_cc(struct callout *c, str
 	cc_exec_curr(cc, direct) = c;
 	cc_exec_restart(cc, direct) = false;
 	cc_exec_drain_fn(cc, direct) = NULL;
-	cc_exec_drain_arg(cc, direct) = NULL;
 
 	if (c_lock != NULL) {
 		cc_exec_cancel(cc, direct) = false;
@@ -836,9 +828,9 @@ softclock_call_cc(struct callout *c, str
 	sbt1 = sbinuptime();
 #endif
 	THREAD_NO_SLEEPING();
-	SDT_PROBE(callout_execute, kernel, , callout__start, c, 0, 0, 0, 0);
+	SDT_PROBE1(callout_execute, kernel, , callout__start, c);
 	c_func(c_arg);
-	SDT_PROBE(callout_execute, kernel, , callout__end, c, 0, 0, 0, 0);
+	SDT_PROBE1(callout_execute, kernel, , callout__end, c);
 	THREAD_SLEEPING_OK();
 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
 	sbt2 = sbinuptime();
@@ -879,8 +871,7 @@ skip_cc_locked:
 		 */
 		CC_UNLOCK(cc);
 		/* call drain function unlocked */
-		cc_exec_drain_fn(cc, direct)(
-		    cc_exec_drain_arg(cc, direct));
+		cc_exec_drain_fn(cc, direct)(c_arg);
 		CC_LOCK(cc);
 	} else if (c_flags & CALLOUT_LOCAL_ALLOC) {
 		/* return callout back to freelist */
@@ -1002,12 +993,27 @@ callout_handle_init(struct callout_handl
 	handle->callout = NULL;
 }
 
+#ifdef KTR
+static const char *
+callout_retvalstring(int retval)
+{
+	switch (retval) {
+	case CALLOUT_RET_DRAINING:
+		return ("callout cannot be stopped and needs drain");
+	case CALLOUT_RET_CANCELLED:
+		return ("callout was successfully stopped");
+	default:
+		return ("callout was already stopped");
+	}
+}
+#endif
+
 static int
 callout_restart_async(struct callout *c, struct callout_args *coa,
-    callout_func_t *drain_fn, void *drain_arg)
+    callout_func_t *drain_fn)
 {
 	struct callout_cpu *cc;
-	int cancelled;
+	int retval;
 	int direct;
 
 	cc = callout_lock(c);
@@ -1022,16 +1028,19 @@ callout_restart_async(struct callout *c,
 	if (cc_exec_curr(cc, direct) == c) {
 		/*
 		 * Try to prevent the callback from running by setting
-		 * the "cc_cancel" variable to "true". Also check if
-		 * the callout was previously subject to a deferred
-		 * callout restart:
+		 * the "cc_cancel" variable to "true".
 		 */
-		if (cc_exec_cancel(cc, direct) == false ||
-		    (c->c_flags & CALLOUT_DEFRESTART) != 0) {
+		if (drain_fn != NULL) {
+			/* set drain function, if any */
+			cc_exec_drain_fn(cc, direct) = drain_fn;
 			cc_exec_cancel(cc, direct) = true;
-			cancelled = CALLOUT_RET_CANCELLED;
+			retval = CALLOUT_RET_DRAINING;
+		} else if (cc_exec_cancel(cc, direct) == false ||
+		    cc_exec_restart(cc, direct) == true) {
+			cc_exec_cancel(cc, direct) = true;
+			retval = CALLOUT_RET_CANCELLED;
 		} else {
-			cancelled = CALLOUT_RET_NORMAL;
+			retval = CALLOUT_RET_DRAINING;
 		}
 
 		/*
@@ -1041,31 +1050,23 @@ callout_restart_async(struct callout *c,
 		 */
 		if (cc_exec_drain_fn(cc, direct) != NULL ||
 		    coa == NULL || (c->c_flags & CALLOUT_LOCAL_ALLOC) != 0) {
-			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
-			    cancelled ? "cancelled and draining" : "draining",
+			CTR4(KTR_CALLOUT, "%s: %p func %p arg %p",
+			    callout_retvalstring(retval),
 			    c, c->c_func, c->c_arg);
 
 			/* clear old flags, if any */
 			c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING |
-			    CALLOUT_DEFRESTART | CALLOUT_PROCESSED);
+			    CALLOUT_PROCESSED);
 
 			/* clear restart flag, if any */
 			cc_exec_restart(cc, direct) = false;
-
-			/* set drain function, if any */
-			if (drain_fn != NULL) {
-				cc_exec_drain_fn(cc, direct) = drain_fn;
-				cc_exec_drain_arg(cc, direct) = drain_arg;
-				cancelled |= CALLOUT_RET_DRAINING;
-			}
 		} else {
-			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
-			    cancelled ? "cancelled and restarting" : "restarting",
+			CTR4(KTR_CALLOUT, "%s: %p func %p arg %p",
+			    callout_retvalstring(retval),
 			    c, c->c_func, c->c_arg);
 
 			/* get us back into the game */
-			c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING |
-			    CALLOUT_DEFRESTART);
+			c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
 			c->c_flags &= ~CALLOUT_PROCESSED;
 
 			/* enable deferred restart */
@@ -1086,13 +1087,13 @@ callout_restart_async(struct callout *c,
 			} else {
 				TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
 			}
-			cancelled = CALLOUT_RET_CANCELLED;
+			retval = CALLOUT_RET_CANCELLED;
 		} else {
-			cancelled = CALLOUT_RET_NORMAL;
+			retval = CALLOUT_RET_STOPPED;
 		}
 
-		CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
-		    cancelled ? "rescheduled" : "scheduled",
+		CTR4(KTR_CALLOUT, "%s: %p func %p arg %p",
+		    callout_retvalstring(retval),
 		    c, c->c_func, c->c_arg);
 
 		/* [re-]schedule callout, if any */
@@ -1101,17 +1102,17 @@ callout_restart_async(struct callout *c,
 		} else {
 			/* clear old flags, if any */
 			c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING |
-			    CALLOUT_DEFRESTART | CALLOUT_PROCESSED);
+			    CALLOUT_PROCESSED);
 
 			/* return callback to pre-allocated list, if any */
 			if ((c->c_flags & CALLOUT_LOCAL_ALLOC) &&
-			    cancelled != CALLOUT_RET_NORMAL) {
+			    retval != CALLOUT_RET_STOPPED) {
 				callout_cc_del(c, cc);
 			}
 		}
 	}
 	CC_UNLOCK(cc);
-	return (cancelled);
+	return (retval);
 }
 
 /*
@@ -1189,7 +1190,7 @@ callout_reset_sbt_on(struct callout *c, 
 	}
 
 	/* get callback started, if any */
-	return (callout_restart_async(c, &coa, NULL, NULL));
+	return (callout_restart_async(c, &coa, NULL));
 }
 
 /*
@@ -1211,27 +1212,26 @@ int
 callout_stop(struct callout *c)
 {
 	/* get callback stopped, if any */
-	return (callout_restart_async(c, NULL, NULL, NULL));
+	return (callout_restart_async(c, NULL, NULL));
 }
 
 static void
 callout_drain_function(void *arg)
 {
-	wakeup(arg);
+	wakeup(&callout_drain_function);
 }
 
 int
-callout_drain_async(struct callout *c, callout_func_t *fn, void *arg)
+callout_async_drain(struct callout *c, callout_func_t *fn)
 {
 	/* get callback stopped, if any */
-	return (callout_restart_async(
-	    c, NULL, fn, arg) & CALLOUT_RET_DRAINING);
+	return (callout_restart_async(c, NULL, fn));
 }
 
 int
 callout_drain(struct callout *c)
 {
-	int cancelled;
+	int retval;
 
 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
 	    "Draining callout");
@@ -1240,9 +1240,9 @@ callout_drain(struct callout *c)
 
 	/* at this point the "c->c_cpu" field is not changing */
 
-	cancelled = callout_drain_async(c, &callout_drain_function, c);
+	retval = callout_async_drain(c, &callout_drain_function);
 
-	if (cancelled != CALLOUT_RET_NORMAL) {
+	if (retval == CALLOUT_RET_DRAINING) {
 		struct callout_cpu *cc;
 		int direct;
 
@@ -1259,19 +1259,21 @@ callout_drain(struct callout *c)
 		callout_unlock_client(c->c_flags, c->c_lock);
 
 		/* Wait for drain to complete */
-
-		while (cc_exec_curr(cc, direct) == c)
-			msleep_spin(c, (struct mtx *)&cc->cc_lock, "codrain", 0);
+		while (cc_exec_curr(cc, direct) == c) {
+			msleep_spin(&callout_drain_function,
+			    (struct mtx *)&cc->cc_lock, "codrain", 0);
+		}
 
 		CC_UNLOCK(cc);
 	} else {
 		callout_unlock_client(c->c_flags, c->c_lock);
 	}
 
-	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
+	CTR4(KTR_CALLOUT, "%s: %p func %p arg %p",
+	    callout_retvalstring(retval),
 	    c, c->c_func, c->c_arg);
 
-	return (cancelled & CALLOUT_RET_CANCELLED);
+	return (retval);
 }
 
 void

Modified: projects/hps_head/sys/netinet/tcp_timer.c
==============================================================================
--- projects/hps_head/sys/netinet/tcp_timer.c	Thu Dec  3 18:04:43 2015	(r291713)
+++ projects/hps_head/sys/netinet/tcp_timer.c	Thu Dec  3 19:35:10 2015	(r291714)
@@ -57,6 +57,7 @@ __FBSDID("$FreeBSD$");
 
 #include <netinet/cc.h>
 #include <netinet/in.h>
+#include <netinet/in_kdtrace.h>
 #include <netinet/in_pcb.h>
 #include <netinet/in_rss.h>
 #include <netinet/in_systm.h>
@@ -369,6 +370,8 @@ tcp_timer_2msl(void *xtp)
 		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
 			  PRU_SLOWTIMO);
 #endif
+	TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
+
 	if (tp != NULL)
 		INP_WUNLOCK(inp);
 	INP_INFO_RUNLOCK(&V_tcbinfo);
@@ -454,6 +457,7 @@ tcp_timer_keep(void *xtp)
 		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
 			  PRU_SLOWTIMO);
 #endif
+	TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
 	INP_WUNLOCK(inp);
 	INP_INFO_RUNLOCK(&V_tcbinfo);
 	CURVNET_RESTORE();
@@ -468,6 +472,7 @@ dropit:
 		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
 			  PRU_SLOWTIMO);
 #endif
+	TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
 	if (tp != NULL)
 		INP_WUNLOCK(tp->t_inpcb);
 	INP_INFO_RUNLOCK(&V_tcbinfo);
@@ -546,6 +551,7 @@ out:
 	if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
 		tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO);
 #endif
+	TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
 	if (tp != NULL)
 		INP_WUNLOCK(inp);
 	INP_INFO_RUNLOCK(&V_tcbinfo);
@@ -658,9 +664,15 @@ tcp_timer_rexmt(void * xtp)
 		int isipv6;
 #endif
 
+		/*
+		 * Idea here is that at each stage of mtu probe (usually, 1448
+		 * -> 1188 -> 524) should be given 2 chances to recover before
+		 *  further clamping down. 'tp->t_rxtshift % 2 == 0' should
+		 *  take care of that.
+		 */
 		if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) ==
 		    (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) &&
-		    (tp->t_rxtshift <= 2)) {
+		    (tp->t_rxtshift >= 2 && tp->t_rxtshift % 2 == 0)) {
 			/*
 			 * Enter Path MTU Black-hole Detection mechanism:
 			 * - Disable Path MTU Discovery (IP "DF" bit).
@@ -728,9 +740,11 @@ tcp_timer_rexmt(void * xtp)
 			 * with a lowered MTU, maybe this isn't a blackhole and
 			 * we restore the previous MSS and blackhole detection
 			 * flags.
+			 * The limit '6' is determined by giving each probe
+			 * stage (1448, 1188, 524) 2 chances to recover.
 			 */
 			if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
-			    (tp->t_rxtshift > 4)) {
+			    (tp->t_rxtshift > 6)) {
 				tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 				tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
 				optlen = tp->t_maxopd - tp->t_maxseg;
@@ -792,6 +806,7 @@ out:
 		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
 			  PRU_SLOWTIMO);
 #endif
+	TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
 	if (tp != NULL)
 		INP_WUNLOCK(inp);
 	if (headlocked)
@@ -847,7 +862,7 @@ tcp_timer_activate(struct tcpcb *tp, uin
 		}
 	if (delta == 0) {
 		if ((tp->t_timers->tt_flags & timer_type) &&
-		    callout_stop(t_callout) &&
+		    (callout_stop(t_callout) == CALLOUT_RET_CANCELLED) &&
 		    (tp->t_timers->tt_flags & f_reset)) {
 			tp->t_timers->tt_flags &= ~(timer_type | f_reset);
 		}
@@ -934,7 +949,7 @@ tcp_timer_stop(struct tcpcb *tp, uint32_
 		}
 
 	if (tp->t_timers->tt_flags & timer_type) {
-		if (callout_drain_async(t_callout, f_callout, tp) == 0 &&
+		if (callout_async_drain(t_callout, f_callout) != CALLOUT_RET_DRAINING &&
 		    (tp->t_timers->tt_flags & f_reset)) {
 			tp->t_timers->tt_flags &= ~(timer_type | f_reset);
 		} else {

Modified: projects/hps_head/sys/sys/callout.h
==============================================================================
--- projects/hps_head/sys/sys/callout.h	Thu Dec  3 18:04:43 2015	(r291713)
+++ projects/hps_head/sys/sys/callout.h	Thu Dec  3 19:35:10 2015	(r291714)
@@ -46,12 +46,17 @@
 #define	CALLOUT_MPSAFE		0x0008 /* deprecated */
 #define	CALLOUT_RETURNUNLOCKED	0x0010 /* handler returns with mtx unlocked */
 #define	CALLOUT_UNUSED_5	0x0020 /* --available-- */
-#define	CALLOUT_DEFRESTART	0x0040 /* callout restart is deferred */
+#define	CALLOUT_UNUSED_6	0x0040 /* --available-- */
 #define	CALLOUT_PROCESSED	0x0080 /* callout in wheel or processing list? */
 #define	CALLOUT_DIRECT 		0x0100 /* allow exec from hw int context */
 #define	CALLOUT_SET_LC(x)	(((x) & 7) << 16) /* set lock class */
 #define	CALLOUT_GET_LC(x)	(((x) >> 16) & 7) /* get lock class */
 
+/* return values for all callout_xxx() functions */
+#define	CALLOUT_RET_DRAINING	0 /* callout cannot be stopped, need drain */
+#define	CALLOUT_RET_CANCELLED	1 /* callout was successfully stopped */
+#define	CALLOUT_RET_STOPPED	-1 /* callout was already stopped */
+
 #define	C_DIRECT_EXEC		0x0001 /* direct execution of callout */
 #define	C_PRELBITS		7
 #define	C_PRELRANGE		((1 << C_PRELBITS) - 1)
@@ -68,7 +73,7 @@ struct callout_handle {
 #define	callout_active(c)	((c)->c_flags & CALLOUT_ACTIVE)
 #define	callout_deactivate(c)	((c)->c_flags &= ~CALLOUT_ACTIVE)
 int	callout_drain(struct callout *);
-int	callout_drain_async(struct callout *, callout_func_t *, void *);
+int	callout_async_drain(struct callout *, callout_func_t *);
 void	callout_init(struct callout *, int);
 void	_callout_init_lock(struct callout *, struct lock_object *, int);
 #define	callout_init_mtx(c, mtx, flags)					\



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201512031935.tB3JZAxU013341>