Date: Thu, 5 Sep 2019 16:15:14 -0700 (PDT) From: "Rodney W. Grimes" <freebsd@gndrsh.dnsmgr.net> To: Warner Losh <imp@freebsd.org> Cc: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org Subject: Re: svn commit: r351902 - in stable/12: lib/libefivar sbin/devmatch sbin/nvmecontrol sbin/nvmecontrol/modules/wdc share/man/man4 share/man/man9 stand/efi/libefi stand/efi/loader stand/forth stand/i386/... Message-ID: <201909052315.x85NFElx027352@gndrsh.dnsmgr.net> In-Reply-To: <201909052238.x85Mcs3R010976@repo.freebsd.org>
next in thread | previous in thread | raw e-mail | index | archive | help
> Author: imp > Date: Thu Sep 5 22:38:53 2019 > New Revision: 351902 > URL: https://svnweb.freebsd.org/changeset/base/351902 > > Log: > MFC r343755: > > Regularize the Netflix copyright Thanks, but FYI, noted hunk of something else below... > Modified: stable/12/sys/netinet/tcp_stacks/rack.c Are you sure you wanted to do this to rack.c? > ============================================================================== > --- stable/12/sys/netinet/tcp_stacks/rack.c Thu Sep 5 22:25:19 2019 (r351901) > +++ stable/12/sys/netinet/tcp_stacks/rack.c Thu Sep 5 22:38:53 2019 (r351902) > @@ -1,5 +1,5 @@ > /*- > - * Copyright (c) 2016-2019 Netflix, Inc. > + * Copyright (c) 2016-2018 Netflix, Inc. > * > * Redistribution and use in source and binary forms, with or without > * modification, are permitted provided that the following conditions > @@ -202,7 +202,6 @@ static int32_t rack_always_send_oldest = 0; > static int32_t rack_sack_block_limit = 128; > static int32_t rack_use_sack_filter = 1; > static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; > -static uint32_t rack_map_split_limit = 0; /* unlimited by default */ > > /* Rack specific counters */ > counter_u64_t rack_badfr; > @@ -228,8 +227,6 @@ counter_u64_t rack_to_arm_tlp; > counter_u64_t rack_to_alloc; > counter_u64_t rack_to_alloc_hard; > counter_u64_t rack_to_alloc_emerg; > -counter_u64_t rack_alloc_limited_conns; > -counter_u64_t rack_split_limited; > > counter_u64_t rack_sack_proc_all; > counter_u64_t rack_sack_proc_short; > @@ -263,8 +260,6 @@ static void > rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, > struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery); > static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); > -static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, > - uint8_t limit_type); > static struct rack_sendmap * > rack_check_recovery_mode(struct tcpcb *tp, > uint32_t tsused); > @@ -449,8 +444,6 @@ sysctl_rack_clear(SYSCTL_HANDLER_ARGS) > counter_u64_zero(rack_sack_proc_short); > counter_u64_zero(rack_sack_proc_restart); > counter_u64_zero(rack_to_alloc); > - counter_u64_zero(rack_alloc_limited_conns); > - counter_u64_zero(rack_split_limited); > counter_u64_zero(rack_find_high); > counter_u64_zero(rack_runt_sacks); > counter_u64_zero(rack_used_tlpmethod); > @@ -628,11 +621,6 @@ rack_init_sysctls() > OID_AUTO, "pktdelay", CTLFLAG_RW, > &rack_pkt_delay, 1, > "Extra RACK time (in ms) besides reordering thresh"); > - SYSCTL_ADD_U32(&rack_sysctl_ctx, > - SYSCTL_CHILDREN(rack_sysctl_root), > - OID_AUTO, "split_limit", CTLFLAG_RW, > - &rack_map_split_limit, 0, > - "Is there a limit on the number of map split entries (0=unlimited)"); > SYSCTL_ADD_S32(&rack_sysctl_ctx, > SYSCTL_CHILDREN(rack_sysctl_root), > OID_AUTO, "inc_var", CTLFLAG_RW, > @@ -768,19 +756,7 @@ rack_init_sysctls() > SYSCTL_CHILDREN(rack_sysctl_root), > OID_AUTO, "allocemerg", CTLFLAG_RD, > &rack_to_alloc_emerg, > - "Total allocations done from emergency cache"); > - rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); > - SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, > - SYSCTL_CHILDREN(rack_sysctl_root), > - OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, > - &rack_alloc_limited_conns, > - "Connections with allocations dropped due to limit"); > - rack_split_limited = counter_u64_alloc(M_WAITOK); > - SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, > - SYSCTL_CHILDREN(rack_sysctl_root), > - OID_AUTO, "split_limited", CTLFLAG_RD, > - &rack_split_limited, > - "Split allocations dropped due to limit"); > + "Total alocations done from emergency cache"); > rack_sack_proc_all = counter_u64_alloc(M_WAITOK); > SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, > SYSCTL_CHILDREN(rack_sysctl_root), > @@ -1144,11 +1120,10 @@ rack_alloc(struct tcp_rack *rack) > { > struct rack_sendmap *rsm; > > + counter_u64_add(rack_to_alloc, 1); > + rack->r_ctl.rc_num_maps_alloced++; > rsm = uma_zalloc(rack_zone, M_NOWAIT); > if (rsm) { > -alloc_done: > - counter_u64_add(rack_to_alloc, 1); > - rack->r_ctl.rc_num_maps_alloced++; > return (rsm); > } > if (rack->rc_free_cnt) { > @@ -1156,46 +1131,14 @@ alloc_done: > rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); > TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_next); > rack->rc_free_cnt--; > - goto alloc_done; > + return (rsm); > } > return (NULL); > } > > -/* wrapper to allocate a sendmap entry, subject to a specific limit */ > -static struct rack_sendmap * > -rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) > -{ > - struct rack_sendmap *rsm; > - > - if (limit_type) { > - /* currently there is only one limit type */ > - if (rack_map_split_limit > 0 && > - rack->r_ctl.rc_num_split_allocs >= rack_map_split_limit) { > - counter_u64_add(rack_split_limited, 1); > - if (!rack->alloc_limit_reported) { > - rack->alloc_limit_reported = 1; > - counter_u64_add(rack_alloc_limited_conns, 1); > - } > - return (NULL); > - } > - } > - > - /* allocate and mark in the limit type, if set */ > - rsm = rack_alloc(rack); > - if (rsm != NULL && limit_type) { > - rsm->r_limit_type = limit_type; > - rack->r_ctl.rc_num_split_allocs++; > - } > - return (rsm); > -} > - > static void > rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) > { > - if (rsm->r_limit_type) { > - /* currently there is only one limit type */ > - rack->r_ctl.rc_num_split_allocs--; > - } > rack->r_ctl.rc_num_maps_alloced--; > if (rack->r_ctl.rc_tlpsend == rsm) > rack->r_ctl.rc_tlpsend = NULL; > @@ -1481,21 +1424,9 @@ rack_cc_after_idle(struct tcpcb *tp, int reduce_larges > > if (tp->snd_cwnd == 1) > i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ > - else if (V_tcp_initcwnd_segments) > - i_cwnd = min((V_tcp_initcwnd_segments * tp->t_maxseg), > - max(2 * tp->t_maxseg, V_tcp_initcwnd_segments * 1460)); > - else if (V_tcp_do_rfc3390) > - i_cwnd = min(4 * tp->t_maxseg, > - max(2 * tp->t_maxseg, 4380)); > - else { > - /* Per RFC5681 Section 3.1 */ > - if (tp->t_maxseg > 2190) > - i_cwnd = 2 * tp->t_maxseg; > - else if (tp->t_maxseg > 1095) > - i_cwnd = 3 * tp->t_maxseg; > - else > - i_cwnd = 4 * tp->t_maxseg; > - } > + else > + i_cwnd = tcp_compute_initwnd(tcp_maxseg(tp)); > + > if (reduce_largest) { > /* > * Do we reduce the largest cwnd to make > @@ -1790,17 +1721,6 @@ rack_drop_checks(struct tcpopt *to, struct mbuf *m, st > TCPSTAT_INC(tcps_rcvpartduppack); > TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); > } > - /* > - * DSACK - add SACK block for dropped range > - */ > - if (tp->t_flags & TF_SACK_PERMIT) { > - tcp_update_sack_list(tp, th->th_seq, th->th_seq + tlen); > - /* > - * ACK now, as the next in-sequence segment > - * will clear the DSACK block again > - */ > - tp->t_flags |= TF_ACKNOW; > - } > *drop_hdrlen += todrop; /* drop from the top afterwards */ > th->th_seq += todrop; > tlen -= todrop; > @@ -2949,7 +2869,7 @@ rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *ra > TCPSTAT_INC(tcps_rexmttimeo); > if ((tp->t_state == TCPS_SYN_SENT) || > (tp->t_state == TCPS_SYN_RECEIVED)) > - rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]); > + rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_syn_backoff[tp->t_rxtshift]); > else > rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; > TCPT_RANGESET(tp->t_rxtcur, rexmt, > @@ -4022,7 +3942,7 @@ do_rest_ofb: > /* > * Need to split this in two pieces the before and after. > */ > - nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); > + nrsm = rack_alloc(rack); > if (nrsm == NULL) { > /* > * failed XXXrrs what can we do but loose the sack > @@ -4083,7 +4003,7 @@ do_rest_ofb: > goto do_rest_ofb; > } > /* Ok we need to split off this one at the tail */ > - nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); > + nrsm = rack_alloc(rack); > if (nrsm == NULL) { > /* failed rrs what can we do but loose the sack info? */ > goto out; > @@ -4833,8 +4753,6 @@ dodata: /* XXX */ > if ((tlen || (thflags & TH_FIN) || tfo_syn) && > TCPS_HAVERCVDFIN(tp->t_state) == 0) { > tcp_seq save_start = th->th_seq; > - tcp_seq save_rnxt = tp->rcv_nxt; > - int save_tlen = tlen; > > m_adj(m, drop_hdrlen); /* delayed header drop */ > /* > @@ -4877,29 +4795,11 @@ dodata: /* XXX */ > * m_adj() doesn't actually frees any mbufs when > * trimming from the head. > */ > - tcp_seq temp = save_start; > - thflags = tcp_reass(tp, th, &temp, &tlen, m); > + thflags = tcp_reass(tp, th, &save_start, &tlen, m); > tp->t_flags |= TF_ACKNOW; > } > - if (((tlen == 0) && (save_tlen > 0) && > - (SEQ_LT(save_start, save_rnxt)))) { > - /* > - * DSACK actually handled in the fastpath > - * above. > - */ > - tcp_update_sack_list(tp, save_start, save_start + save_tlen); > - } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { > - /* > - * Cleaning sackblks by using zero length > - * update. > - */ > - tcp_update_sack_list(tp, save_start, save_start); > - } else if ((tlen > 0) && (tlen >= save_tlen)) { > - /* Update of sackblks. */ > - tcp_update_sack_list(tp, save_start, save_start + save_tlen); > - } else if (tlen > 0) { > - tcp_update_sack_list(tp, save_start, save_start+tlen); > - } > + if (tlen > 0) > + tcp_update_sack_list(tp, save_start, save_start + tlen); > } else { > m_freem(m); > thflags &= ~TH_FIN; > > Modified: stable/12/sys/netinet/tcp_stacks/rack_bbr_common.h > ============================================================================== ... -- Rod Grimes rgrimes@freebsd.org
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201909052315.x85NFElx027352>