Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 13 Jul 2021 20:38:59 GMT
From:      Michael Tuexen <tuexen@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-branches@FreeBSD.org
Subject:   git: deb3c279d156 - stable/13 - tcp: HPTS performance enhancements
Message-ID:  <202107132038.16DKcxBB026393@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch stable/13 has been updated by tuexen:

URL: https://cgit.FreeBSD.org/src/commit/?id=deb3c279d1560f71c1777f52bf3e73eebe3d986a

commit deb3c279d1560f71c1777f52bf3e73eebe3d986a
Author:     Randall Stewart <rrs@FreeBSD.org>
AuthorDate: 2021-07-06 19:23:22 +0000
Commit:     Michael Tuexen <tuexen@FreeBSD.org>
CommitDate: 2021-07-13 19:58:30 +0000

    tcp: HPTS performance enhancements
    
    HPTS drives both rack and bbr, and yet there have been many complaints
    about performance. This bit of work restructures hpts to help reduce CPU
    overhead. It does this by now instead of relying on the timer/callout to
    drive it instead use user return from a system call as well as lro flushes
    to drive hpts. The timer becomes a backstop that dynamically adjusts
    based on how "late" we are.
    
    Reviewed by:            tuexen, glebius
    Sponsored by:           Netflix Inc.
    Differential Revision:  https://reviews.freebsd.org/D31083
    
    (cherry picked from commit d7955cc0ffdf9fb58013245a6f181c757574ea0a)
---
 sys/kern/subr_trap.c          |   10 +
 sys/netinet/in_pcb.h          |    4 +-
 sys/netinet/tcp_hpts.c        | 1309 +++++++++++++++++++++++++----------------
 sys/netinet/tcp_hpts.h        |   99 +++-
 sys/netinet/tcp_lro.c         |   28 +-
 sys/netinet/tcp_lro.h         |   10 +-
 sys/netinet/tcp_stacks/bbr.c  |    6 +-
 sys/netinet/tcp_stacks/rack.c |   22 +-
 8 files changed, 931 insertions(+), 557 deletions(-)

diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 8981091b50ed..d0f616d037c5 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -140,6 +140,16 @@ userret(struct thread *td, struct trapframe *frame)
 #ifdef HWPMC_HOOKS
 	if (PMC_THREAD_HAS_SAMPLES(td))
 		PMC_CALL_HOOK(td, PMC_FN_THR_USERRET, NULL);
+#endif
+#ifdef TCPHPTS
+	/*
+	 * @gallatin is adament that this needs to go here, I
+	 * am not so sure. Running hpts is a lot like
+	 * a lro_flush() that happens while a user process
+	 * is running. But he may know best so I will go
+	 * with his view of accounting. :-)
+	 */
+	tcp_run_hpts();
 #endif
 	/*
 	 * Let the scheduler adjust our priority etc.
diff --git a/sys/netinet/in_pcb.h b/sys/netinet/in_pcb.h
index 9604a837cfb4..1849ad34fe6c 100644
--- a/sys/netinet/in_pcb.h
+++ b/sys/netinet/in_pcb.h
@@ -258,6 +258,7 @@ struct inpcb {
 	volatile uint32_t inp_in_input; /* on input hpts (lock b) */
 #endif
 	volatile uint16_t  inp_hpts_cpu; /* Lock (i) */
+	volatile uint16_t  inp_irq_cpu;	/* Set by LRO in behalf of or the driver */
 	u_int	inp_refcount;		/* (i) refcount */
 	int	inp_flags;		/* (i) generic IP/datagram flags */
 	int	inp_flags2;		/* (i) generic IP/datagram flags #2*/
@@ -266,7 +267,8 @@ struct inpcb {
 			 inp_input_cpu_set : 1,	/* on input hpts (i) */
 			 inp_hpts_calls :1,	/* (i) from output hpts */
 			 inp_input_calls :1,	/* (i) from input hpts */
-			 inp_spare_bits2 : 4;
+			 inp_irq_cpu_set :1,	/* (i) from LRO/Driver */
+			 inp_spare_bits2 : 3;
 	uint8_t inp_numa_domain;	/* numa domain */
 	void	*inp_ppcb;		/* (i) pointer to per-protocol pcb */
 	struct	socket *inp_socket;	/* (i) back pointer to socket */
diff --git a/sys/netinet/tcp_hpts.c b/sys/netinet/tcp_hpts.c
index fd8b66b9ccdb..9a390e3a85c8 100644
--- a/sys/netinet/tcp_hpts.c
+++ b/sys/netinet/tcp_hpts.c
@@ -193,23 +193,29 @@ static int tcp_bind_threads = 1;
 #else
 static int tcp_bind_threads = 2;
 #endif
-TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
-
+static int tcp_use_irq_cpu = 0;
 static struct tcp_hptsi tcp_pace;
+static uint32_t *cts_last_ran;
 static int hpts_does_tp_logging = 0;
+static int hpts_use_assigned_cpu = 1;
+static int32_t hpts_uses_oldest = OLDEST_THRESHOLD;
 
-static void tcp_wakehpts(struct tcp_hpts_entry *p);
-static void tcp_wakeinput(struct tcp_hpts_entry *p);
 static void tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv);
-static void tcp_hptsi(struct tcp_hpts_entry *hpts);
+static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
 static void tcp_hpts_thread(void *ctx);
 static void tcp_init_hptsi(void *st);
 
 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
-static int32_t tcp_hpts_callout_skip_swi = 0;
+static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
+static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
+static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
+
+
 
 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
     "TCP Hpts controls");
+SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+    "TCP Hpts statistics");
 
 #define	timersub(tvp, uvp, vvp)						\
 	do {								\
@@ -230,44 +236,92 @@ struct hpts_domain_info {
 
 struct hpts_domain_info hpts_domains[MAXMEMDOM];
 
-SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
-    &tcp_hpts_precision, 120,
-    "Value for PRE() precision of callout");
-
 counter_u64_t hpts_hopelessly_behind;
 
-SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, hopeless, CTLFLAG_RD,
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
     &hpts_hopelessly_behind,
     "Number of times hpts could not catch up and was behind hopelessly");
 
 counter_u64_t hpts_loops;
 
-SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, loops, CTLFLAG_RD,
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
     &hpts_loops, "Number of times hpts had to loop to catch up");
 
 counter_u64_t back_tosleep;
 
-SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
     &back_tosleep, "Number of times hpts found no tcbs");
 
 counter_u64_t combined_wheel_wrap;
 
-SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
     &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
 
 counter_u64_t wheel_wrap;
 
-SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, wheel_wrap, CTLFLAG_RD,
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
     &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
 
-static int32_t out_ts_percision = 0;
+counter_u64_t hpts_direct_call;
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
+    &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
+
+counter_u64_t hpts_wake_timeout;
+
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
+    &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
+
+counter_u64_t hpts_direct_awakening;
+
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
+    &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
 
-SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tspercision, CTLFLAG_RW,
-    &out_ts_percision, 0,
-    "Do we use a percise timestamp for every output cts");
+counter_u64_t hpts_back_tosleep;
+
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
+    &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
+
+counter_u64_t cpu_uses_flowid;
+counter_u64_t cpu_uses_random;
+
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
+    &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
+SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
+    &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
+
+TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
+TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
+    &tcp_bind_threads, 2,
+    "Thread Binding tunable");
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
+    &tcp_use_irq_cpu, 0,
+    "Use of irq CPU  tunable");
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
+    &tcp_hpts_precision, 120,
+    "Value for PRE() precision of callout");
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
+    &conn_cnt_thresh, 0,
+    "How many connections (below) make us use the callout based mechanism");
 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
     &hpts_does_tp_logging, 0,
     "Do we add to any tp that has logging on pacer logs");
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_assigned_cpu, CTLFLAG_RW,
+    &hpts_use_assigned_cpu, 0,
+    "Do we start any hpts timer on the assigned cpu?");
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_oldest, CTLFLAG_RW,
+    &hpts_uses_oldest, OLDEST_THRESHOLD,
+    "Do syscalls look for the hpts that has been the longest since running (or just use cpu no if 0)?");
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
+    &dynamic_min_sleep, 250,
+    "What is the dynamic minsleep value?");
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
+    &dynamic_max_sleep, 5000,
+    "What is the dynamic maxsleep value?");
+
+
+
+
 
 static int32_t max_pacer_loops = 10;
 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
@@ -287,7 +341,7 @@ sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
 	new = hpts_sleep_max;
 	error = sysctl_handle_int(oidp, &new, 0, req);
 	if (error == 0 && req->newptr) {
-		if ((new < (NUM_OF_HPTSI_SLOTS / 4)) ||
+		if ((new < dynamic_min_sleep) ||
 		    (new > HPTS_MAX_SLEEP_ALLOWED))
 			error = EINVAL;
 		else
@@ -296,26 +350,60 @@ sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
 	return (error);
 }
 
+static int
+sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
+{
+	int error;
+	uint32_t new;
+
+	new = tcp_min_hptsi_time;
+	error = sysctl_handle_int(oidp, &new, 0, req);
+	if (error == 0 && req->newptr) {
+		if (new < LOWEST_SLEEP_ALLOWED)
+			error = EINVAL;
+		else
+			tcp_min_hptsi_time = new;
+	}
+	return (error);
+}
+
 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
     &hpts_sleep_max, 0,
     &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
     "Maximum time hpts will sleep");
 
-SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, minsleep, CTLFLAG_RW,
+SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
+    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
     &tcp_min_hptsi_time, 0,
+    &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
     "The minimum time the hpts must sleep before processing more slots");
 
-SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, skip_swi, CTLFLAG_RW,
-    &tcp_hpts_callout_skip_swi, 0,
-    "Do we have the callout call directly to the hpts?");
+static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
+static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
+static int tcp_hpts_no_wake_over_thresh = 1;
+
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
+    &ticks_indicate_more_sleep, 0,
+    "If we only process this many or less on a timeout, we need longer sleep on the next callout");
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
+    &ticks_indicate_less_sleep, 0,
+    "If we process this many or more on a timeout, we need less sleep on the next callout");
+SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
+    &tcp_hpts_no_wake_over_thresh, 0,
+    "When we are over the threshold on the pacer do we prohibit wakeups?");
 
 static void
 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
-	     int ticks_to_run, int idx)
+	     int slots_to_run, int idx, int from_callout)
 {
 	union tcp_log_stackspecific log;
-
+	/*
+	 * Unused logs are
+	 * 64 bit - delRate, rttProp, bw_inuse
+	 * 16 bit - cwnd_gain
+	 *  8 bit - bbr_state, bbr_substate, inhpts, ininput;
+	 */
 	memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 	log.u_bbr.flex1 = hpts->p_nxt_slot;
 	log.u_bbr.flex2 = hpts->p_cur_slot;
@@ -323,8 +411,9 @@ tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
 	log.u_bbr.flex4 = idx;
 	log.u_bbr.flex5 = hpts->p_curtick;
 	log.u_bbr.flex6 = hpts->p_on_queue_cnt;
-	log.u_bbr.use_lt_bw = 1;
-	log.u_bbr.inflight = ticks_to_run;
+	log.u_bbr.flex7 = hpts->p_cpu;
+	log.u_bbr.flex8 = (uint8_t)from_callout;
+	log.u_bbr.inflight = slots_to_run;
 	log.u_bbr.applimited = hpts->overidden_sleep;
 	log.u_bbr.delivered = hpts->saved_curtick;
 	log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
@@ -332,7 +421,9 @@ tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
 	log.u_bbr.lt_epoch = hpts->saved_prev_slot;
 	log.u_bbr.pkts_out = hpts->p_delayed_by;
 	log.u_bbr.lost = hpts->p_hpts_sleep_time;
-	log.u_bbr.cur_del_rate = hpts->p_runningtick;
+	log.u_bbr.pacing_gain = hpts->p_cpu;
+	log.u_bbr.pkt_epoch = hpts->p_runningslot;
+	log.u_bbr.use_lt_bw = 1;
 	TCP_LOG_EVENTP(tp, NULL,
 		       &tp->t_inpcb->inp_socket->so_rcv,
 		       &tp->t_inpcb->inp_socket->so_snd,
@@ -341,47 +432,40 @@ tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
 }
 
 static void
-hpts_timeout_swi(void *arg)
+tcp_wakehpts(struct tcp_hpts_entry *hpts)
 {
-	struct tcp_hpts_entry *hpts;
+	HPTS_MTX_ASSERT(hpts);
 
-	hpts = (struct tcp_hpts_entry *)arg;
-	swi_sched(hpts->ie_cookie, 0);
+	if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
+		hpts->p_direct_wake = 0;
+		return;
+	}
+	if (hpts->p_hpts_wake_scheduled == 0) {
+		hpts->p_hpts_wake_scheduled = 1;
+		swi_sched(hpts->ie_cookie, 0);
+	}
 }
 
 static void
-hpts_timeout_dir(void *arg)
+hpts_timeout_swi(void *arg)
 {
-	tcp_hpts_thread(arg);
+	struct tcp_hpts_entry *hpts;
+
+	hpts = (struct tcp_hpts_entry *)arg;
+	swi_sched(hpts->ie_cookie, 0);
 }
 
 static inline void
 hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int clear)
 {
-#ifdef INVARIANTS
-	if (mtx_owned(&hpts->p_mtx) == 0) {
-		/* We don't own the mutex? */
-		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
-	}
-	if (hpts->p_cpu != inp->inp_hpts_cpu) {
-		/* It is not the right cpu/mutex? */
-		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
-	}
-	if (inp->inp_in_hpts == 0) {
-		/* We are not on the hpts? */
-		panic("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp);
-	}
-#endif
+	HPTS_MTX_ASSERT(hpts);
+	KASSERT(hpts->p_cpu == inp->inp_hpts_cpu, ("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
+	KASSERT(inp->inp_in_hpts != 0, ("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp));
 	TAILQ_REMOVE(head, inp, inp_hpts);
 	hpts->p_on_queue_cnt--;
-	if (hpts->p_on_queue_cnt < 0) {
-		/* Count should not go negative .. */
-#ifdef INVARIANTS
-		panic("Hpts goes negative inp:%p hpts:%p",
-		    inp, hpts);
-#endif
-		hpts->p_on_queue_cnt = 0;
-	}
+	KASSERT(hpts->p_on_queue_cnt >= 0,
+		("Hpts goes negative inp:%p hpts:%p",
+		 inp, hpts));
 	if (clear) {
 		inp->inp_hpts_request = 0;
 		inp->inp_in_hpts = 0;
@@ -391,20 +475,13 @@ hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hpt
 static inline void
 hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int line, int noref)
 {
-#ifdef INVARIANTS
-	if (mtx_owned(&hpts->p_mtx) == 0) {
-		/* We don't own the mutex? */
-		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
-	}
-	if (hpts->p_cpu != inp->inp_hpts_cpu) {
-		/* It is not the right cpu/mutex? */
-		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
-	}
-	if ((noref == 0) && (inp->inp_in_hpts == 1)) {
-		/* We are already on the hpts? */
-		panic("%s: hpts:%p inp:%p already on the hpts?", __FUNCTION__, hpts, inp);
-	}
-#endif
+	HPTS_MTX_ASSERT(hpts);
+	KASSERT(hpts->p_cpu == inp->inp_hpts_cpu,
+		("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
+	KASSERT(((noref == 1) && (inp->inp_in_hpts == 1)) ||
+		((noref == 0) && (inp->inp_in_hpts == 0)),
+		("%s: hpts:%p inp:%p already on the hpts?",
+		 __FUNCTION__, hpts, inp));
 	TAILQ_INSERT_TAIL(head, inp, inp_hpts);
 	inp->inp_in_hpts = 1;
 	hpts->p_on_queue_cnt++;
@@ -416,37 +493,20 @@ hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hpt
 static inline void
 hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear)
 {
-#ifdef INVARIANTS
-	if (mtx_owned(&hpts->p_mtx) == 0) {
-		/* We don't own the mutex? */
-		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
-	}
-	if (hpts->p_cpu != inp->inp_input_cpu) {
-		/* It is not the right cpu/mutex? */
-		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
-	}
-	if (inp->inp_in_input == 0) {
-		/* We are not on the input hpts? */
-		panic("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp);
-	}
-#endif
+	HPTS_MTX_ASSERT(hpts);
+	KASSERT(hpts->p_cpu == inp->inp_hpts_cpu,
+		("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
+	KASSERT(inp->inp_in_input != 0,
+		("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp));
 	TAILQ_REMOVE(&hpts->p_input, inp, inp_input);
 	hpts->p_on_inqueue_cnt--;
-	if (hpts->p_on_inqueue_cnt < 0) {
-#ifdef INVARIANTS
-		panic("Hpts in goes negative inp:%p hpts:%p",
-		    inp, hpts);
-#endif
-		hpts->p_on_inqueue_cnt = 0;
-	}
-#ifdef INVARIANTS
-	if (TAILQ_EMPTY(&hpts->p_input) &&
-	    (hpts->p_on_inqueue_cnt != 0)) {
-		/* We should not be empty with a queue count */
-		panic("%s hpts:%p in_hpts input empty but cnt:%d",
-		    __FUNCTION__, hpts, hpts->p_on_inqueue_cnt);
-	}
-#endif
+	KASSERT(hpts->p_on_inqueue_cnt >= 0,
+		("Hpts in goes negative inp:%p hpts:%p",
+		 inp, hpts));
+	KASSERT((((TAILQ_EMPTY(&hpts->p_input) != 0) && (hpts->p_on_inqueue_cnt == 0)) ||
+		 ((TAILQ_EMPTY(&hpts->p_input) == 0) && (hpts->p_on_inqueue_cnt > 0))),
+		("%s hpts:%p input cnt (p_on_inqueue):%d and queue state mismatch",
+		 __FUNCTION__, hpts, hpts->p_on_inqueue_cnt));
 	if (clear)
 		inp->inp_in_input = 0;
 }
@@ -454,46 +514,17 @@ hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear
 static inline void
 hpts_sane_input_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, int line)
 {
-#ifdef INVARIANTS
-	if (mtx_owned(&hpts->p_mtx) == 0) {
-		/* We don't own the mutex? */
-		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
-	}
-	if (hpts->p_cpu != inp->inp_input_cpu) {
-		/* It is not the right cpu/mutex? */
-		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
-	}
-	if (inp->inp_in_input == 1) {
-		/* We are already on the input hpts? */
-		panic("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp);
-	}
-#endif
+	HPTS_MTX_ASSERT(hpts);
+	KASSERT(hpts->p_cpu == inp->inp_hpts_cpu,
+		("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
+	KASSERT(inp->inp_in_input == 0,
+		("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp));
 	TAILQ_INSERT_TAIL(&hpts->p_input, inp, inp_input);
 	inp->inp_in_input = 1;
 	hpts->p_on_inqueue_cnt++;
 	in_pcbref(inp);
 }
 
-static void
-tcp_wakehpts(struct tcp_hpts_entry *hpts)
-{
-	HPTS_MTX_ASSERT(hpts);
-	if (hpts->p_hpts_wake_scheduled == 0) {
-		hpts->p_hpts_wake_scheduled = 1;
-		swi_sched(hpts->ie_cookie, 0);
-	}
-}
-
-static void
-tcp_wakeinput(struct tcp_hpts_entry *hpts)
-{
-	HPTS_MTX_ASSERT(hpts);
-	if (hpts->p_hpts_wake_scheduled == 0) {
-		hpts->p_hpts_wake_scheduled = 1;
-		swi_sched(hpts->ie_cookie, 0);
-	}
-}
-
 struct tcp_hpts_entry *
 tcp_cur_hpts(struct inpcb *inp)
 {
@@ -514,12 +545,9 @@ tcp_hpts_lock(struct inpcb *inp)
 again:
 	hpts_num = inp->inp_hpts_cpu;
 	hpts = tcp_pace.rp_ent[hpts_num];
-#ifdef INVARIANTS
-	if (mtx_owned(&hpts->p_mtx)) {
-		panic("Hpts:%p owns mtx prior-to lock line:%d",
-		    hpts, __LINE__);
-	}
-#endif
+	KASSERT(mtx_owned(&hpts->p_mtx) == 0,
+		("Hpts:%p owns mtx prior-to lock line:%d",
+		 hpts, __LINE__));
 	mtx_lock(&hpts->p_mtx);
 	if (hpts_num != inp->inp_hpts_cpu) {
 		mtx_unlock(&hpts->p_mtx);
@@ -537,12 +565,9 @@ tcp_input_lock(struct inpcb *inp)
 again:
 	hpts_num = inp->inp_input_cpu;
 	hpts = tcp_pace.rp_ent[hpts_num];
-#ifdef INVARIANTS
-	if (mtx_owned(&hpts->p_mtx)) {
-		panic("Hpts:%p owns mtx prior-to lock line:%d",
-		    hpts, __LINE__);
-	}
-#endif
+	KASSERT(mtx_owned(&hpts->p_mtx) == 0,
+		("Hpts:%p owns mtx prior-to lock line:%d",
+		hpts, __LINE__));
 	mtx_lock(&hpts->p_mtx);
 	if (hpts_num != inp->inp_input_cpu) {
 		mtx_unlock(&hpts->p_mtx);
@@ -555,6 +580,7 @@ static void
 tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line)
 {
 	int32_t add_freed;
+	int32_t ret;
 
 	if (inp->inp_flags2 & INP_FREED) {
 		/*
@@ -567,26 +593,11 @@ tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line)
 		add_freed = 0;
 	}
 #ifndef INP_REF_DEBUG
-	if (in_pcbrele_wlocked(inp)) {
-		/*
-		 * This should not happen. We have the inpcb referred to by
-		 * the main socket (why we are called) and the hpts. It
-		 * should always return 0.
-		 */
-		panic("inpcb:%p release ret 1",
-		    inp);
-	}
+	ret = in_pcbrele_wlocked(inp);
 #else
-	if (__in_pcbrele_wlocked(inp, line)) {
-		/*
-		 * This should not happen. We have the inpcb referred to by
-		 * the main socket (why we are called) and the hpts. It
-		 * should always return 0.
-		 */
-		panic("inpcb:%p release ret 1",
-		    inp);
-	}
+	ret = __in_pcbrele_wlocked(inp, line);
 #endif
+	KASSERT(ret != 1, ("inpcb:%p release ret 1", inp));
 	if (add_freed) {
 		inp->inp_flags2 |= INP_FREED;
 	}
@@ -642,73 +653,76 @@ __tcp_hpts_remove(struct inpcb *inp, int32_t flags, int32_t line)
 }
 
 static inline int
-hpts_tick(uint32_t wheel_tick, uint32_t plus)
+hpts_slot(uint32_t wheel_slot, uint32_t plus)
 {
 	/*
 	 * Given a slot on the wheel, what slot
 	 * is that plus ticks out?
 	 */
-	KASSERT(wheel_tick < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_tick));
-	return ((wheel_tick + plus) % NUM_OF_HPTSI_SLOTS);
+	KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
+	return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
 }
 
 static inline int
 tick_to_wheel(uint32_t cts_in_wticks)
 {
 	/*
-	 * Given a timestamp in wheel ticks (10usec inc's)
-	 * map it to our limited space wheel.
+	 * Given a timestamp in ticks (so by
+	 * default to get it to a real time one
+	 * would multiply by 10.. i.e the number
+	 * of ticks in a slot) map it to our limited
+	 * space wheel.
 	 */
 	return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
 }
 
 static inline int
-hpts_ticks_diff(int prev_tick, int tick_now)
+hpts_slots_diff(int prev_slot, int slot_now)
 {
 	/*
-	 * Given two ticks that are someplace
+	 * Given two slots that are someplace
 	 * on our wheel. How far are they apart?
 	 */
-	if (tick_now > prev_tick)
-		return (tick_now - prev_tick);
-	else if (tick_now == prev_tick)
+	if (slot_now > prev_slot)
+		return (slot_now - prev_slot);
+	else if (slot_now == prev_slot)
 		/*
 		 * Special case, same means we can go all of our
 		 * wheel less one slot.
 		 */
 		return (NUM_OF_HPTSI_SLOTS - 1);
 	else
-		return ((NUM_OF_HPTSI_SLOTS - prev_tick) + tick_now);
+		return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
 }
 
 /*
- * Given a tick on the wheel that is the current time
- * mapped to the wheel (wheel_tick), what is the maximum
+ * Given a slot on the wheel that is the current time
+ * mapped to the wheel (wheel_slot), what is the maximum
  * distance forward that can be obtained without
- * wrapping past either prev_tick or running_tick
+ * wrapping past either prev_slot or running_slot
  * depending on the htps state? Also if passed
- * a uint32_t *, fill it with the tick location.
+ * a uint32_t *, fill it with the slot location.
  *
  * Note if you do not give this function the current
- * time (that you think it is) mapped to the wheel
+ * time (that you think it is) mapped to the wheel slot
  * then the results will not be what you expect and
  * could lead to invalid inserts.
  */
 static inline int32_t
-max_ticks_available(struct tcp_hpts_entry *hpts, uint32_t wheel_tick, uint32_t *target_tick)
+max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
 {
-	uint32_t dis_to_travel, end_tick, pacer_to_now, avail_on_wheel;
+	uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
 
 	if ((hpts->p_hpts_active == 1) &&
 	    (hpts->p_wheel_complete == 0)) {
-		end_tick = hpts->p_runningtick;
+		end_slot = hpts->p_runningslot;
 		/* Back up one tick */
-		if (end_tick == 0)
-			end_tick = NUM_OF_HPTSI_SLOTS - 1;
+		if (end_slot == 0)
+			end_slot = NUM_OF_HPTSI_SLOTS - 1;
 		else
-			end_tick--;
-		if (target_tick)
-			*target_tick = end_tick;
+			end_slot--;
+		if (target_slot)
+			*target_slot = end_slot;
 	} else {
 		/*
 		 * For the case where we are
@@ -718,26 +732,26 @@ max_ticks_available(struct tcp_hpts_entry *hpts, uint32_t wheel_tick, uint32_t *
 		 * prev tick and subtract one from it. This puts us
 		 * as far out as possible on the wheel.
 		 */
-		end_tick = hpts->p_prev_slot;
-		if (end_tick == 0)
-			end_tick = NUM_OF_HPTSI_SLOTS - 1;
+		end_slot = hpts->p_prev_slot;
+		if (end_slot == 0)
+			end_slot = NUM_OF_HPTSI_SLOTS - 1;
 		else
-			end_tick--;
-		if (target_tick)
-			*target_tick = end_tick;
+			end_slot--;
+		if (target_slot)
+			*target_slot = end_slot;
 		/*
 		 * Now we have close to the full wheel left minus the
 		 * time it has been since the pacer went to sleep. Note
 		 * that wheel_tick, passed in, should be the current time
 		 * from the perspective of the caller, mapped to the wheel.
 		 */
-		if (hpts->p_prev_slot != wheel_tick)
-			dis_to_travel = hpts_ticks_diff(hpts->p_prev_slot, wheel_tick);
+		if (hpts->p_prev_slot != wheel_slot)
+			dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
 		else
 			dis_to_travel = 1;
 		/*
 		 * dis_to_travel in this case is the space from when the
-		 * pacer stopped (p_prev_slot) and where our wheel_tick
+		 * pacer stopped (p_prev_slot) and where our wheel_slot
 		 * is now. To know how many slots we can put it in we
 		 * subtract from the wheel size. We would not want
 		 * to place something after p_prev_slot or it will
@@ -746,21 +760,21 @@ max_ticks_available(struct tcp_hpts_entry *hpts, uint32_t wheel_tick, uint32_t *
 		return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
 	}
 	/*
-	 * So how many slots are open between p_runningtick -> p_cur_slot
+	 * So how many slots are open between p_runningslot -> p_cur_slot
 	 * that is what is currently un-available for insertion. Special
 	 * case when we are at the last slot, this gets 1, so that
 	 * the answer to how many slots are available is all but 1.
 	 */
-	if (hpts->p_runningtick == hpts->p_cur_slot)
+	if (hpts->p_runningslot == hpts->p_cur_slot)
 		dis_to_travel = 1;
 	else
-		dis_to_travel = hpts_ticks_diff(hpts->p_runningtick, hpts->p_cur_slot);
+		dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
 	/*
 	 * How long has the pacer been running?
 	 */
-	if (hpts->p_cur_slot != wheel_tick) {
+	if (hpts->p_cur_slot != wheel_slot) {
 		/* The pacer is a bit late */
-		pacer_to_now = hpts_ticks_diff(hpts->p_cur_slot, wheel_tick);
+		pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
 	} else {
 		/* The pacer is right on time, now == pacers start time */
 		pacer_to_now = 0;
@@ -774,24 +788,24 @@ max_ticks_available(struct tcp_hpts_entry *hpts, uint32_t wheel_tick, uint32_t *
 	/*
 	 * Now how many of those we will eat due to the pacer's
 	 * time (p_cur_slot) of start being behind the
-	 * real time (wheel_tick)?
+	 * real time (wheel_slot)?
 	 */
 	if (avail_on_wheel <= pacer_to_now) {
 		/*
 		 * Wheel wrap, we can't fit on the wheel, that
 		 * is unusual the system must be way overloaded!
-		 * Insert into the assured tick, and return special
+		 * Insert into the assured slot, and return special
 		 * "0".
 		 */
 		counter_u64_add(combined_wheel_wrap, 1);
-		*target_tick = hpts->p_nxt_slot;
+		*target_slot = hpts->p_nxt_slot;
 		return (0);
 	} else {
 		/*
 		 * We know how many slots are open
 		 * on the wheel (the reverse of what
 		 * is left to run. Take away the time
-		 * the pacer started to now (wheel_tick)
+		 * the pacer started to now (wheel_slot)
 		 * and that tells you how many slots are
 		 * open that can be inserted into that won't
 		 * be touched by the pacer until later.
@@ -815,7 +829,7 @@ tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpt
 			 * A sleeping hpts we want in next slot to run
 			 * note that in this state p_prev_slot == p_cur_slot
 			 */
-			inp->inp_hptsslot = hpts_tick(hpts->p_prev_slot, 1);
+			inp->inp_hptsslot = hpts_slot(hpts->p_prev_slot, 1);
 			if ((hpts->p_on_min_sleep == 0) && (hpts->p_hpts_active == 0))
 				need_wake = 1;
 		} else if ((void *)inp == hpts->p_inp) {
@@ -827,7 +841,7 @@ tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpt
 			 */
 			inp->inp_hptsslot = hpts->p_nxt_slot;
 		} else
-			inp->inp_hptsslot = hpts->p_runningtick;
+			inp->inp_hptsslot = hpts->p_runningslot;
 		hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
 		if (need_wake) {
 			/*
@@ -862,9 +876,9 @@ check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct inpcb *inp, uin
 	 * Sanity checks for the pacer with invariants
 	 * on insert.
 	 */
-	if (inp_hptsslot >= NUM_OF_HPTSI_SLOTS)
-		panic("hpts:%p inp:%p slot:%d > max",
-		      hpts, inp, inp_hptsslot);
+	KASSERT(inp_hptsslot < NUM_OF_HPTSI_SLOTS,
+		("hpts:%p inp:%p slot:%d > max",
+		 hpts, inp, inp_hptsslot));
 	if ((hpts->p_hpts_active) &&
 	    (hpts->p_wheel_complete == 0)) {
 		/*
@@ -875,17 +889,16 @@ check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct inpcb *inp, uin
 		 */
 		int distance, yet_to_run;
 
-		distance = hpts_ticks_diff(hpts->p_runningtick, inp_hptsslot);
-		if (hpts->p_runningtick != hpts->p_cur_slot)
-			yet_to_run = hpts_ticks_diff(hpts->p_runningtick, hpts->p_cur_slot);
+		distance = hpts_slots_diff(hpts->p_runningslot, inp_hptsslot);
+		if (hpts->p_runningslot != hpts->p_cur_slot)
+			yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
 		else
 			yet_to_run = 0;	/* processing last slot */
-		if (yet_to_run > distance) {
-			panic("hpts:%p inp:%p slot:%d distance:%d yet_to_run:%d rs:%d cs:%d",
-			      hpts, inp, inp_hptsslot,
-			      distance, yet_to_run,
-			      hpts->p_runningtick, hpts->p_cur_slot);
-		}
+		KASSERT(yet_to_run <= distance,
+			("hpts:%p inp:%p slot:%d distance:%d yet_to_run:%d rs:%d cs:%d",
+			 hpts, inp, inp_hptsslot,
+			 distance, yet_to_run,
+			 hpts->p_runningslot, hpts->p_cur_slot));
 	}
 }
 #endif
@@ -895,8 +908,9 @@ tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t
 		       struct hpts_diag *diag, struct timeval *tv)
 {
 	uint32_t need_new_to = 0;
-	uint32_t wheel_cts, last_tick;
-	int32_t wheel_tick, maxticks;
+	uint32_t wheel_cts; 
+	int32_t wheel_slot, maxslots, last_slot;
+	int cpu;
 	int8_t need_wakeup = 0;
 
 	HPTS_MTX_ASSERT(hpts);
@@ -904,7 +918,7 @@ tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t
 		memset(diag, 0, sizeof(struct hpts_diag));
 		diag->p_hpts_active = hpts->p_hpts_active;
 		diag->p_prev_slot = hpts->p_prev_slot;
-		diag->p_runningtick = hpts->p_runningtick;
+		diag->p_runningslot = hpts->p_runningslot;
 		diag->p_nxt_slot = hpts->p_nxt_slot;
 		diag->p_cur_slot = hpts->p_cur_slot;
 		diag->p_curtick = hpts->p_curtick;
@@ -913,131 +927,120 @@ tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t
 		diag->p_on_min_sleep = hpts->p_on_min_sleep;
 		diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
 	}
-	if (inp->inp_in_hpts == 0) {
-		if (slot == 0) {
-			/* Immediate */
-			tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
-			return;
-		}
-		/* Get the current time relative to the wheel */
-		wheel_cts = tcp_tv_to_hptstick(tv);
-		/* Map it onto the wheel */
-		wheel_tick = tick_to_wheel(wheel_cts);
-		/* Now what's the max we can place it at? */
-		maxticks = max_ticks_available(hpts, wheel_tick, &last_tick);
-		if (diag) {
-			diag->wheel_tick = wheel_tick;
-			diag->maxticks = maxticks;
-			diag->wheel_cts = wheel_cts;
+	KASSERT(inp->inp_in_hpts == 0, ("Hpts:%p tp:%p already on hpts and add?", hpts, inp));
+	if (slot == 0) {
+		/* Immediate */
+		tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
+		return;
+	}
+	/* Get the current time relative to the wheel */
+	wheel_cts = tcp_tv_to_hptstick(tv);
+	/* Map it onto the wheel */
+	wheel_slot = tick_to_wheel(wheel_cts);
+	/* Now what's the max we can place it at? */
+	maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
+	if (diag) {
+		diag->wheel_slot = wheel_slot;
+		diag->maxslots = maxslots;
+		diag->wheel_cts = wheel_cts;
+	}
+	if (maxslots == 0) {
+		/* The pacer is in a wheel wrap behind, yikes! */
+		if (slot > 1) {
+			/*
+			 * Reduce by 1 to prevent a forever loop in
+			 * case something else is wrong. Note this
+			 * probably does not hurt because the pacer
+			 * if its true is so far behind we will be
+			 * > 1second late calling anyway.
+			 */
+			slot--;
 		}
-		if (maxticks == 0) {
-			/* The pacer is in a wheel wrap behind, yikes! */
-			if (slot > 1) {
-				/*
-				 * Reduce by 1 to prevent a forever loop in
-				 * case something else is wrong. Note this
-				 * probably does not hurt because the pacer
-				 * if its true is so far behind we will be
-				 * > 1second late calling anyway.
-				 */
-				slot--;
-			}
-			inp->inp_hptsslot = last_tick;
-			inp->inp_hpts_request = slot;
-		} else 	if (maxticks >= slot) {
-			/* It all fits on the wheel */
-			inp->inp_hpts_request = 0;
-			inp->inp_hptsslot = hpts_tick(wheel_tick, slot);
-		} else {
-			/* It does not fit */
-			inp->inp_hpts_request = slot - maxticks;
-			inp->inp_hptsslot = last_tick;
+		inp->inp_hptsslot = last_slot;
+		inp->inp_hpts_request = slot;
+	} else 	if (maxslots >= slot) {
+		/* It all fits on the wheel */
+		inp->inp_hpts_request = 0;
+		inp->inp_hptsslot = hpts_slot(wheel_slot, slot);
+	} else {
+		/* It does not fit */
+		inp->inp_hpts_request = slot - maxslots;
+		inp->inp_hptsslot = last_slot;
+	}
+	if (diag) {
+		diag->slot_remaining = inp->inp_hpts_request;
+		diag->inp_hptsslot = inp->inp_hptsslot;
+	}
+#ifdef INVARIANTS
+	check_if_slot_would_be_wrong(hpts, inp, inp->inp_hptsslot, line);
+#endif
+	hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, 0);
+	if ((hpts->p_hpts_active == 0) &&
+	    (inp->inp_hpts_request == 0) &&
+	    (hpts->p_on_min_sleep == 0)) {
+		/*
+		 * The hpts is sleeping and NOT on a minimum
+		 * sleep time, we need to figure out where
+		 * it will wake up at and if we need to reschedule
+		 * its time-out.
+		 */
+		uint32_t have_slept, yet_to_sleep;
+
+		/* Now do we need to restart the hpts's timer? */
+		have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
+		if (have_slept < hpts->p_hpts_sleep_time)
+			yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
+		else {
+			/* We are over-due */
+			yet_to_sleep = 0;
+			need_wakeup = 1;
 		}
 		if (diag) {
-			diag->slot_remaining = inp->inp_hpts_request;
-			diag->inp_hptsslot = inp->inp_hptsslot;
+			diag->have_slept = have_slept;
+			diag->yet_to_sleep = yet_to_sleep;
 		}
-#ifdef INVARIANTS
-		check_if_slot_would_be_wrong(hpts, inp, inp->inp_hptsslot, line);
-#endif
-		hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, 0);
-		if ((hpts->p_hpts_active == 0) &&
-		    (inp->inp_hpts_request == 0) &&
-		    (hpts->p_on_min_sleep == 0)) {
+		if (yet_to_sleep &&
*** 1485 LINES SKIPPED ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202107132038.16DKcxBB026393>