; - tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(slot), &diag); + tcp_hpts_insert(tp, pacing_delay, &diag); bbr->rc_timer_first = 0; bbr->bbr_timer_src = frm; - bbr_log_to_start(bbr, cts, hpts_timeout, slot, 1); + bbr_log_to_start(bbr, cts, hpts_timeout, pacing_delay, 1); bbr_log_hpts_diag(bbr, cts, &diag); } else if (hpts_timeout) { - tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), &diag); + tcp_hpts_insert(tp, hpts_timeout, &diag); /* - * We add the flag here as well if the slot is set, + * We add the flag here as well if the pacing delay is set, * since hpts will call in to clear the queue first before * calling the output routine (which does our timers). * We don't want to set the flag if its just a timer @@ -917,7 +917,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_ * on a keep-alive timer and a request comes in for * more data. */ - if (slot) + if (pacing_delay) bbr->rc_pacer_started = cts; if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && (bbr->rc_cwnd_limited == 0)) { @@ -934,12 +934,12 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_ TF2_DONT_SACK_QUEUE); } bbr->bbr_timer_src = frm; - bbr_log_to_start(bbr, cts, hpts_timeout, slot, 0); + bbr_log_to_start(bbr, cts, hpts_timeout, pacing_delay, 0); bbr_log_hpts_diag(bbr, cts, &diag); bbr->rc_timer_first = 1; } bbr->rc_tmr_stopped = 0; - bbr_log_type_bbrsnd(bbr, tot_len, slot, delay_calc, cts, frm, prev_delay); + bbr_log_type_bbrsnd(bbr, tot_len, pacing_delay, delay_calc, cts, frm, prev_delay); } static void @@ -1031,8 +1031,8 @@ bbr_timer_audit(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, struct sock } /* * Ok the timer originally started is not what we want now. We will - * force the hpts to be stopped if any, and restart with the slot - * set to what was in the saved slot. + * force the hpts to be stopped if any, and restart with the pacing + * delay set to what was in the saved delay. */ wrong_timer: if ((bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) { @@ -2395,7 +2395,7 @@ bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag) log.u_bbr.flex2 = diag->p_cur_slot; log.u_bbr.flex3 = diag->slot_req; log.u_bbr.flex4 = diag->inp_hptsslot; - log.u_bbr.flex5 = diag->slot_remaining; + log.u_bbr.flex5 = diag->time_remaining; log.u_bbr.flex6 = diag->need_new_to; log.u_bbr.flex7 = diag->p_hpts_active; log.u_bbr.flex8 = diag->p_on_min_sleep; @@ -2468,7 +2468,7 @@ bbr_log_pacing_delay_calc(struct tcp_bbr *bbr, uint16_t gain, uint32_t len, } static void -bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) +bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t pacing_delay, uint8_t which) { if (tcp_bblogging_on(bbr->rc_tp)) { union tcp_log_stackspecific log; @@ -2478,7 +2478,7 @@ bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, u log.u_bbr.flex1 = bbr->bbr_timer_src; log.u_bbr.flex2 = to; log.u_bbr.flex3 = bbr->r_ctl.rc_hpts_flags; - log.u_bbr.flex4 = slot; + log.u_bbr.flex4 = pacing_delay; log.u_bbr.flex5 = bbr->rc_tp->t_hpts_slot; log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur); log.u_bbr.pkts_out = bbr->rc_tp->t_flags2; @@ -2728,13 +2728,13 @@ bbr_type_log_hdwr_pacing(struct tcp_bbr *bbr, const struct ifnet *ifp, } static void -bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot, uint32_t del_by, uint32_t cts, uint32_t line, uint32_t prev_delay) +bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t pacing_delay, uint32_t del_by, uint32_t cts, uint32_t line, uint32_t prev_delay) { if (tcp_bblogging_on(bbr->rc_tp)) { union tcp_log_stackspecific log; bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); - log.u_bbr.flex1 = slot; + log.u_bbr.flex1 = pacing_delay; log.u_bbr.flex2 = del_by; log.u_bbr.flex3 = prev_delay; log.u_bbr.flex4 = line; @@ -5200,7 +5200,7 @@ bbr_process_timers(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, uint8_t left = bbr->r_ctl.rc_timer_exp - cts; ret = -3; bbr_log_to_processing(bbr, cts, ret, left, hpts_calling); - tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(left), NULL); + tcp_hpts_insert(tp, left, NULL); return (1); } bbr->rc_tmr_stopped = 0; @@ -5249,7 +5249,7 @@ bbr_timer_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts) else time_since_send = 0; if (bbr->r_ctl.rc_last_delay_val > time_since_send) { - /* Cut down our slot time */ + /* Cut down our pacing_delay time */ bbr->r_ctl.rc_last_delay_val -= time_since_send; } else { bbr->r_ctl.rc_last_delay_val = 0; @@ -5883,7 +5883,7 @@ bbr_log_output(struct tcp_bbr *bbr, struct tcpcb *tp, struct tcpopt *to, int32_t * sequence 1 for 10 bytes. In such an example the r_start would be * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. * This means that r_end is actually the first sequence for the next - * slot (11). + * pacing delay (11). * */ INP_WLOCK_ASSERT(tptoinpcb(tp)); @@ -11851,7 +11851,7 @@ bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv) struct bbr_sendmap *rsm = NULL; int32_t tso, mtu; struct tcpopt to; - int32_t slot = 0; + int32_t pacing_delay = 0; struct inpcb *inp; struct sockbuf *sb; bool hpts_calling; @@ -11981,8 +11981,7 @@ bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv) delay_calc -= bbr->r_ctl.rc_last_delay_val; else { /* - * We are early setup to adjust - * our slot time. + * We are early setup to adjust out pacing delay. */ uint64_t merged_val; @@ -12099,7 +12098,7 @@ again: #endif error = 0; tso = 0; - slot = 0; + pacing_delay = 0; mtu = 0; sendwin = min(tp->snd_wnd, tp->snd_cwnd); sb_offset = tp->snd_max - tp->snd_una; @@ -12121,7 +12120,7 @@ recheck_resend: tot_len = tp->t_maxseg; if (hpts_calling) /* Retry in a ms */ - slot = 1001; + pacing_delay = 1001; goto just_return_nolock; } TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_free, rsm, r_next); @@ -12694,9 +12693,9 @@ just_return: SOCK_SENDBUF_UNLOCK(so); just_return_nolock: if (tot_len) - slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0); + pacing_delay = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0); if (bbr->rc_no_pacing) - slot = 0; + pacing_delay = 0; if (tot_len == 0) { if ((ctf_outstanding(tp) + min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) >= tp->snd_wnd) { @@ -12746,7 +12745,7 @@ just_return_nolock: /* Dont update the time if we did not send */ bbr->r_ctl.rc_last_delay_val = 0; bbr->rc_output_starts_timer = 1; - bbr_start_hpts_timer(bbr, tp, cts, 9, slot, tot_len); + bbr_start_hpts_timer(bbr, tp, cts, 9, pacing_delay, tot_len); bbr_log_type_just_return(bbr, cts, tot_len, hpts_calling, app_limited, p_maxseg, len); if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* Make sure snd_nxt is drug up */ @@ -12782,7 +12781,7 @@ send: flags &= ~TH_FIN; if ((len == 0) && ((tp->t_flags & TF_ACKNOW) == 0)) { /* Lets not send this */ - slot = 0; + pacing_delay = 0; goto just_return; } } @@ -13048,7 +13047,7 @@ send: /* * We have outstanding data, don't send a fin by itself!. */ - slot = 0; + pacing_delay = 0; goto just_return; } /* @@ -13758,7 +13757,7 @@ nomore: if (tp->snd_cwnd < maxseg) tp->snd_cwnd = maxseg; } - slot = (bbr_error_base_paceout + 1) << bbr->oerror_cnt; + pacing_delay = (bbr_error_base_paceout + 1) << bbr->oerror_cnt; BBR_STAT_INC(bbr_saw_enobuf); if (bbr->bbr_hdrw_pacing) counter_u64_add(bbr_hdwr_pacing_enobuf, 1); @@ -13807,18 +13806,18 @@ nomore: } /* * Nuke all other things that can interfere - * with slot + * with pacing delay */ if ((tot_len + len) && (len >= tp->t_maxseg)) { - slot = bbr_get_pacing_delay(bbr, + pacing_delay = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, (tot_len + len), cts, 0); - if (slot < bbr_error_base_paceout) - slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt; + if (pacing_delay < bbr_error_base_paceout) + pacing_delay = (bbr_error_base_paceout + 2) << bbr->oerror_cnt; } else - slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt; + pacing_delay = (bbr_error_base_paceout + 2) << bbr->oerror_cnt; bbr->rc_output_starts_timer = 1; - bbr_start_hpts_timer(bbr, tp, cts, 10, slot, + bbr_start_hpts_timer(bbr, tp, cts, 10, pacing_delay, tot_len); return (error); } @@ -13836,9 +13835,9 @@ nomore: } /* FALLTHROUGH */ default: - slot = (bbr_error_base_paceout + 3) << bbr->oerror_cnt; + pacing_delay = (bbr_error_base_paceout + 3) << bbr->oerror_cnt; bbr->rc_output_starts_timer = 1; - bbr_start_hpts_timer(bbr, tp, cts, 11, slot, 0); + bbr_start_hpts_timer(bbr, tp, cts, 11, pacing_delay, 0); return (error); } #ifdef STATS @@ -13976,12 +13975,12 @@ skip_again: tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); if (((flags & (TH_RST | TH_SYN | TH_FIN)) == 0) && tot_len) { /* - * Calculate/Re-Calculate the hptsi slot in usecs based on + * Calculate/Re-Calculate the hptsi timeout in usecs based on * what we have sent so far */ - slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0); + pacing_delay = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0); if (bbr->rc_no_pacing) - slot = 0; + pacing_delay = 0; } tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); enobufs: @@ -13994,8 +13993,8 @@ enobufs: (more_to_rxt || ((bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts)) != NULL))) { /* Rack cheats and shotguns out all rxt's 1ms apart */ - if (slot > 1000) - slot = 1000; + if (pacing_delay > 1000) + pacing_delay = 1000; } if (bbr->bbr_hdrw_pacing && (bbr->hw_pacing_set == 0)) { /* @@ -14009,7 +14008,7 @@ enobufs: tcp_bbr_tso_size_check(bbr, cts); } } - bbr_start_hpts_timer(bbr, tp, cts, 12, slot, tot_len); + bbr_start_hpts_timer(bbr, tp, cts, 12, pacing_delay, tot_len); if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* Make sure snd_nxt is drug up */ tp->snd_nxt = tp->snd_max; @@ -14127,7 +14126,7 @@ bbr_switch_failed(struct tcpcb *tp) } } else toval = HPTS_USECS_PER_SLOT; - tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(toval), &diag); + tcp_hpts_insert(tp, toval, &diag); bbr_log_hpts_diag(bbr, cts, &diag); } diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c index cff07b4932d9..c7962b57a69e 100644 --- a/sys/netinet/tcp_stacks/rack.c +++ b/sys/netinet/tcp_stacks/rack.c @@ -250,11 +250,11 @@ static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the co static int32_t rack_persist_min = 250000; /* 250usec */ static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */ -static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */ +static uint32_t rack_max_reduce = 10; /* Percent we can reduce pacing delay by */ static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ static int32_t rack_limit_time_with_srtt = 0; static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ -static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ +static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost pacing delay using time_between */ static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ @@ -278,7 +278,7 @@ static int32_t rack_hptsi_segments = 40; static int32_t rack_rate_sample_method = USE_RTT_LOW; static int32_t rack_pace_every_seg = 0; static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ -static int32_t rack_slot_reduction = 4; +static int32_t rack_pacing_delay_reduction = 4; static int32_t rack_wma_divisor = 8; /* For WMA calculation */ static int32_t rack_cwnd_block_ends_measure = 0; static int32_t rack_rwnd_block_ends_measure = 0; @@ -478,7 +478,7 @@ rack_log_alt_to_to_cancel(struct tcp_rack *rack, uint16_t flex7, uint8_t mod); static void -rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, +rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay, uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, struct rack_sendmap *rsm, uint8_t quality); static struct rack_sendmap * @@ -1107,7 +1107,7 @@ rack_init_sysctls(void) SYSCTL_ADD_S32(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_pacing), OID_AUTO, "burst_reduces", CTLFLAG_RW, - &rack_slot_reduction, 4, + &rack_pacing_delay_reduction, 4, "When doing only burst mitigation what is the reduce divisor"); SYSCTL_ADD_S32(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), @@ -1399,7 +1399,7 @@ rack_init_sysctls(void) SYSCTL_CHILDREN(rack_timers), OID_AUTO, "hpts_max_reduce", CTLFLAG_RW, &rack_max_reduce, 10, - "Max percentage we will reduce slot by for pacing when we are behind"); + "Max percentage we will reduce pacing delay by for pacing when we are behind"); SYSCTL_ADD_U32(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_timers), OID_AUTO, "persmin", CTLFLAG_RW, @@ -2700,7 +2700,7 @@ rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t } static void -rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) +rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t pacing_delay, uint8_t which) { if (tcp_bblogging_on(rack->rc_tp)) { union tcp_log_stackspecific log; @@ -2710,7 +2710,7 @@ rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot log.u_bbr.flex1 = rack->rc_tp->t_srtt; log.u_bbr.flex2 = to; log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; - log.u_bbr.flex4 = slot; + log.u_bbr.flex4 = pacing_delay; log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; log.u_bbr.flex7 = rack->rc_in_persist; @@ -3034,14 +3034,14 @@ rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, } static void -rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) +rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay, uint32_t cts, struct timeval *tv, int line) { if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { union tcp_log_stackspecific log; memset(&log, 0, sizeof(log)); log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); - log.u_bbr.flex1 = slot; + log.u_bbr.flex1 = pacing_delay; if (rack->rack_no_prr) log.u_bbr.flex2 = 0; else @@ -3139,7 +3139,7 @@ rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg } static void -rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, +rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t pacing_delay, uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) { if (tcp_bblogging_on(rack->rc_tp)) { @@ -3148,7 +3148,7 @@ rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, ui memset(&log, 0, sizeof(log)); log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); - log.u_bbr.flex1 = slot; + log.u_bbr.flex1 = pacing_delay; log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; log.u_bbr.flex4 = reason; if (rack->rack_no_prr) @@ -6482,7 +6482,7 @@ rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, log.u_bbr.flex2 = diag->p_cur_slot; log.u_bbr.flex3 = diag->slot_req; log.u_bbr.flex4 = diag->inp_hptsslot; - log.u_bbr.flex5 = diag->slot_remaining; + log.u_bbr.flex5 = diag->time_remaining; log.u_bbr.flex6 = diag->need_new_to; log.u_bbr.flex7 = diag->p_hpts_active; log.u_bbr.flex8 = diag->p_on_min_sleep; @@ -6529,14 +6529,14 @@ rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uin static void rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, - int32_t slot, uint32_t tot_len_this_send, int sup_rack) + int32_t usecs, uint32_t tot_len_this_send, int sup_rack) { struct hpts_diag diag; struct inpcb *inp = tptoinpcb(tp); struct timeval tv; uint32_t delayed_ack = 0; uint32_t hpts_timeout; - uint32_t entry_slot = slot; + uint32_t entry_usecs = usecs; uint8_t stopped; uint32_t left = 0; uint32_t us_cts; @@ -6557,7 +6557,7 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, rack->r_ctl.rc_hpts_flags = 0; us_cts = tcp_get_usecs(&tv); /* Now early/late accounting */ - rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); + rack_log_pacing_delay_calc(rack, entry_usecs, usecs, 0, 0, 0, 26, __LINE__, NULL, 0); if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { /* * We have a early carry over set, @@ -6568,7 +6568,7 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, * penalize the next timer for being awoke * by an ack aka the rc_agg_early (non-paced mode). */ - slot += rack->r_ctl.rc_agg_early; + usecs += rack->r_ctl.rc_agg_early; rack->r_early = 0; rack->r_ctl.rc_agg_early = 0; } @@ -6580,29 +6580,29 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, * really depends on what * the current pacing time is. */ - if (rack->r_ctl.rc_agg_delayed >= slot) { + if (rack->r_ctl.rc_agg_delayed >= usecs) { /* * We can't compensate for it all. * And we have to have some time * on the clock. We always have a min - * 10 slots (10 x 10 i.e. 100 usecs). + * 10 HPTS timer units (10 x 10 i.e. 100 usecs). */ - if (slot <= HPTS_USECS_PER_SLOT) { + if (usecs <= HPTS_USECS_PER_SLOT) { /* We gain delay */ - rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - slot); - slot = HPTS_USECS_PER_SLOT; + rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - usecs); + usecs = HPTS_USECS_PER_SLOT; } else { /* We take off some */ - rack->r_ctl.rc_agg_delayed -= (slot - HPTS_USECS_PER_SLOT); - slot = HPTS_USECS_PER_SLOT; + rack->r_ctl.rc_agg_delayed -= (usecs - HPTS_USECS_PER_SLOT); + usecs = HPTS_USECS_PER_SLOT; } } else { - slot -= rack->r_ctl.rc_agg_delayed; + usecs -= rack->r_ctl.rc_agg_delayed; rack->r_ctl.rc_agg_delayed = 0; /* Make sure we have 100 useconds at minimum */ - if (slot < HPTS_USECS_PER_SLOT) { - rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - slot; - slot = HPTS_USECS_PER_SLOT; + if (usecs < HPTS_USECS_PER_SLOT) { + rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - usecs; + usecs = HPTS_USECS_PER_SLOT; } if (rack->r_ctl.rc_agg_delayed == 0) rack->r_late = 0; *** 561 LINES SKIPPED ***