Date: Thu, 21 Nov 2024 00:39:10 GMT From: Gleb Smirnoff <glebius@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: 09000cc133d8 - main - tcp: mechanically rename hostcache metrics structure fields Message-ID: <202411210039.4AL0dAGL044273@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch main has been updated by glebius: URL: https://cgit.FreeBSD.org/src/commit/?id=09000cc133d82fc614d9f61d064e3acf7fa8b875 commit 09000cc133d82fc614d9f61d064e3acf7fa8b875 Author: Gleb Smirnoff <glebius@FreeBSD.org> AuthorDate: 2024-11-21 00:28:15 +0000 Commit: Gleb Smirnoff <glebius@FreeBSD.org> CommitDate: 2024-11-21 00:29:00 +0000 tcp: mechanically rename hostcache metrics structure fields Use hc_ prefix instead of rmx_. The latter stands for "route metrix" and is an artifact from the 90-ies, when TCP caching was embedded into the routing table. The rename should have happened back in 97d8d152c28bb. No functional change. Done with sed(1) command: s/rmx_(mtu|ssthresh|rtt|rttvar|cwnd|sendpipe|recvpipe|granularity|expire|q|hits|updates)/hc_\1/g --- sys/netinet/tcp_hostcache.c | 186 ++++++++++++++++++++++---------------------- sys/netinet/tcp_input.c | 22 +++--- sys/netinet/tcp_subr.c | 12 +-- sys/netinet/tcp_var.h | 14 ++-- 4 files changed, 117 insertions(+), 117 deletions(-) diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c index a42ed8dc0d15..900c70b5cd14 100644 --- a/sys/netinet/tcp_hostcache.c +++ b/sys/netinet/tcp_hostcache.c @@ -99,23 +99,23 @@ struct hc_head { struct hc_metrics { /* housekeeping */ - CK_SLIST_ENTRY(hc_metrics) rmx_q; + CK_SLIST_ENTRY(hc_metrics) hc_q; struct in_addr ip4; /* IP address */ struct in6_addr ip6; /* IP6 address */ uint32_t ip6_zoneid; /* IPv6 scope zone id */ /* endpoint specific values for tcp */ - uint32_t rmx_mtu; /* MTU for this path */ - uint32_t rmx_ssthresh; /* outbound gateway buffer limit */ - uint32_t rmx_rtt; /* estimated round trip time */ - uint32_t rmx_rttvar; /* estimated rtt variance */ - uint32_t rmx_cwnd; /* congestion window */ - uint32_t rmx_sendpipe; /* outbound delay-bandwidth product */ - uint32_t rmx_recvpipe; /* inbound delay-bandwidth product */ + uint32_t hc_mtu; /* MTU for this path */ + uint32_t hc_ssthresh; /* outbound gateway buffer limit */ + uint32_t hc_rtt; /* estimated round trip time */ + uint32_t hc_rttvar; /* estimated rtt variance */ + uint32_t hc_cwnd; /* congestion window */ + uint32_t hc_sendpipe; /* outbound delay-bandwidth product */ + uint32_t hc_recvpipe; /* inbound delay-bandwidth product */ /* TCP hostcache internal data */ - int rmx_expire; /* lifetime for object */ + int hc_expire; /* lifetime for object */ #ifdef TCP_HC_COUNTERS - u_long rmx_hits; /* number of hits */ - u_long rmx_updates; /* number of updates */ + u_long hc_hits; /* number of hits */ + u_long hc_updates; /* number of updates */ #endif }; @@ -347,17 +347,17 @@ tcp_hc_lookup(struct in_conninfo *inc) * Iterate through entries in bucket row looking for a match. */ smr_enter(V_tcp_hostcache.smr); - CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) + CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, hc_q) if (tcp_hc_cmp(hc_entry, inc)) break; if (hc_entry != NULL) { - if (atomic_load_int(&hc_entry->rmx_expire) != + if (atomic_load_int(&hc_entry->hc_expire) != V_tcp_hostcache.expire) - atomic_store_int(&hc_entry->rmx_expire, + atomic_store_int(&hc_entry->hc_expire, V_tcp_hostcache.expire); #ifdef TCP_HC_COUNTERS - hc_entry->rmx_hits++; + hc_entry->hc_hits++; #endif } else smr_exit(V_tcp_hostcache.smr); @@ -393,13 +393,13 @@ tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite) return; } - hc_metrics_lite->rmx_mtu = atomic_load_32(&hc_entry->rmx_mtu); - hc_metrics_lite->rmx_ssthresh = atomic_load_32(&hc_entry->rmx_ssthresh); - hc_metrics_lite->rmx_rtt = atomic_load_32(&hc_entry->rmx_rtt); - hc_metrics_lite->rmx_rttvar = atomic_load_32(&hc_entry->rmx_rttvar); - hc_metrics_lite->rmx_cwnd = atomic_load_32(&hc_entry->rmx_cwnd); - hc_metrics_lite->rmx_sendpipe = atomic_load_32(&hc_entry->rmx_sendpipe); - hc_metrics_lite->rmx_recvpipe = atomic_load_32(&hc_entry->rmx_recvpipe); + hc_metrics_lite->hc_mtu = atomic_load_32(&hc_entry->hc_mtu); + hc_metrics_lite->hc_ssthresh = atomic_load_32(&hc_entry->hc_ssthresh); + hc_metrics_lite->hc_rtt = atomic_load_32(&hc_entry->hc_rtt); + hc_metrics_lite->hc_rttvar = atomic_load_32(&hc_entry->hc_rttvar); + hc_metrics_lite->hc_cwnd = atomic_load_32(&hc_entry->hc_cwnd); + hc_metrics_lite->hc_sendpipe = atomic_load_32(&hc_entry->hc_sendpipe); + hc_metrics_lite->hc_recvpipe = atomic_load_32(&hc_entry->hc_recvpipe); smr_exit(V_tcp_hostcache.smr); } @@ -423,7 +423,7 @@ tcp_hc_getmtu(struct in_conninfo *inc) return (0); } - mtu = atomic_load_32(&hc_entry->rmx_mtu); + mtu = atomic_load_32(&hc_entry->hc_mtu); smr_exit(V_tcp_hostcache.smr); return (mtu); @@ -436,7 +436,7 @@ tcp_hc_getmtu(struct in_conninfo *inc) void tcp_hc_updatemtu(struct in_conninfo *inc, uint32_t mtu) { - struct hc_metrics_lite hcml = { .rmx_mtu = mtu }; + struct hc_metrics_lite hcml = { .hc_mtu = mtu }; return (tcp_hc_update(inc, &hcml)); } @@ -460,20 +460,20 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml) hc_prev = NULL; THC_LOCK(hc_head); - CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) { + CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, hc_q) { if (tcp_hc_cmp(hc_entry, inc)) break; - if (CK_SLIST_NEXT(hc_entry, rmx_q) != NULL) + if (CK_SLIST_NEXT(hc_entry, hc_q) != NULL) hc_prev = hc_entry; } if (hc_entry != NULL) { - if (atomic_load_int(&hc_entry->rmx_expire) != + if (atomic_load_int(&hc_entry->hc_expire) != V_tcp_hostcache.expire) - atomic_store_int(&hc_entry->rmx_expire, + atomic_store_int(&hc_entry->hc_expire, V_tcp_hostcache.expire); #ifdef TCP_HC_COUNTERS - hc_entry->rmx_updates++; + hc_entry->hc_updates++; #endif new = false; } else { @@ -491,18 +491,18 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml) atomic_load_int(&V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) { if (hc_prev != NULL) { - hc_entry = CK_SLIST_NEXT(hc_prev, rmx_q); - KASSERT(CK_SLIST_NEXT(hc_entry, rmx_q) == NULL, + hc_entry = CK_SLIST_NEXT(hc_prev, hc_q); + KASSERT(CK_SLIST_NEXT(hc_entry, hc_q) == NULL, ("%s: %p is not one to last", __func__, hc_prev)); - CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q); + CK_SLIST_REMOVE_AFTER(hc_prev, hc_q); } else if ((hc_entry = CK_SLIST_FIRST(&hc_head->hch_bucket)) != NULL) { - KASSERT(CK_SLIST_NEXT(hc_entry, rmx_q) == NULL, + KASSERT(CK_SLIST_NEXT(hc_entry, hc_q) == NULL, ("%s: %p is not the only element", __func__, hc_entry)); CK_SLIST_REMOVE_HEAD(&hc_head->hch_bucket, - rmx_q); + hc_q); } else { THC_UNLOCK(hc_head); return; @@ -535,7 +535,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml) hc_entry->ip6_zoneid = inc->inc6_zoneid; } else hc_entry->ip4 = inc->inc_faddr; - hc_entry->rmx_expire = V_tcp_hostcache.expire; + hc_entry->hc_expire = V_tcp_hostcache.expire; new = true; } @@ -543,60 +543,60 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml) * Fill in data. Use atomics, since an existing entry is * accessible by readers in SMR section. */ - if (hcml->rmx_mtu != 0) { - atomic_store_32(&hc_entry->rmx_mtu, hcml->rmx_mtu); + if (hcml->hc_mtu != 0) { + atomic_store_32(&hc_entry->hc_mtu, hcml->hc_mtu); } - if (hcml->rmx_rtt != 0) { - if (hc_entry->rmx_rtt == 0) - v = hcml->rmx_rtt; + if (hcml->hc_rtt != 0) { + if (hc_entry->hc_rtt == 0) + v = hcml->hc_rtt; else - v = ((uint64_t)hc_entry->rmx_rtt + - (uint64_t)hcml->rmx_rtt) / 2; - atomic_store_32(&hc_entry->rmx_rtt, v); + v = ((uint64_t)hc_entry->hc_rtt + + (uint64_t)hcml->hc_rtt) / 2; + atomic_store_32(&hc_entry->hc_rtt, v); TCPSTAT_INC(tcps_cachedrtt); } - if (hcml->rmx_rttvar != 0) { - if (hc_entry->rmx_rttvar == 0) - v = hcml->rmx_rttvar; + if (hcml->hc_rttvar != 0) { + if (hc_entry->hc_rttvar == 0) + v = hcml->hc_rttvar; else - v = ((uint64_t)hc_entry->rmx_rttvar + - (uint64_t)hcml->rmx_rttvar) / 2; - atomic_store_32(&hc_entry->rmx_rttvar, v); + v = ((uint64_t)hc_entry->hc_rttvar + + (uint64_t)hcml->hc_rttvar) / 2; + atomic_store_32(&hc_entry->hc_rttvar, v); TCPSTAT_INC(tcps_cachedrttvar); } - if (hcml->rmx_ssthresh != 0) { - if (hc_entry->rmx_ssthresh == 0) - v = hcml->rmx_ssthresh; + if (hcml->hc_ssthresh != 0) { + if (hc_entry->hc_ssthresh == 0) + v = hcml->hc_ssthresh; else - v = (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2; - atomic_store_32(&hc_entry->rmx_ssthresh, v); + v = (hc_entry->hc_ssthresh + hcml->hc_ssthresh) / 2; + atomic_store_32(&hc_entry->hc_ssthresh, v); TCPSTAT_INC(tcps_cachedssthresh); } - if (hcml->rmx_cwnd != 0) { - if (hc_entry->rmx_cwnd == 0) - v = hcml->rmx_cwnd; + if (hcml->hc_cwnd != 0) { + if (hc_entry->hc_cwnd == 0) + v = hcml->hc_cwnd; else - v = ((uint64_t)hc_entry->rmx_cwnd + - (uint64_t)hcml->rmx_cwnd) / 2; - atomic_store_32(&hc_entry->rmx_cwnd, v); + v = ((uint64_t)hc_entry->hc_cwnd + + (uint64_t)hcml->hc_cwnd) / 2; + atomic_store_32(&hc_entry->hc_cwnd, v); /* TCPSTAT_INC(tcps_cachedcwnd); */ } - if (hcml->rmx_sendpipe != 0) { - if (hc_entry->rmx_sendpipe == 0) - v = hcml->rmx_sendpipe; + if (hcml->hc_sendpipe != 0) { + if (hc_entry->hc_sendpipe == 0) + v = hcml->hc_sendpipe; else - v = ((uint64_t)hc_entry->rmx_sendpipe + - (uint64_t)hcml->rmx_sendpipe) /2; - atomic_store_32(&hc_entry->rmx_sendpipe, v); + v = ((uint64_t)hc_entry->hc_sendpipe + + (uint64_t)hcml->hc_sendpipe) /2; + atomic_store_32(&hc_entry->hc_sendpipe, v); /* TCPSTAT_INC(tcps_cachedsendpipe); */ } - if (hcml->rmx_recvpipe != 0) { - if (hc_entry->rmx_recvpipe == 0) - v = hcml->rmx_recvpipe; + if (hcml->hc_recvpipe != 0) { + if (hc_entry->hc_recvpipe == 0) + v = hcml->hc_recvpipe; else - v = ((uint64_t)hc_entry->rmx_recvpipe + - (uint64_t)hcml->rmx_recvpipe) /2; - atomic_store_32(&hc_entry->rmx_recvpipe, v); + v = ((uint64_t)hc_entry->hc_recvpipe + + (uint64_t)hcml->hc_recvpipe) /2; + atomic_store_32(&hc_entry->hc_recvpipe, v); /* TCPSTAT_INC(tcps_cachedrecvpipe); */ } @@ -604,17 +604,17 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml) * Put it upfront. */ if (new) { - CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q); + CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, hc_q); hc_head->hch_length++; KASSERT(hc_head->hch_length <= V_tcp_hostcache.bucket_limit, ("tcp_hostcache: bucket length too high at %p", hc_head)); atomic_add_int(&V_tcp_hostcache.cache_count, 1); TCPSTAT_INC(tcps_hc_added); } else if (hc_entry != CK_SLIST_FIRST(&hc_head->hch_bucket)) { - KASSERT(CK_SLIST_NEXT(hc_prev, rmx_q) == hc_entry, + KASSERT(CK_SLIST_NEXT(hc_prev, hc_q) == hc_entry, ("%s: %p next is not %p", __func__, hc_prev, hc_entry)); - CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q); - CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q); + CK_SLIST_REMOVE_AFTER(hc_prev, hc_q); + CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, hc_q); } THC_UNLOCK(hc_head); } @@ -667,7 +667,7 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS) for (i = 0; i < V_tcp_hostcache.hashsize; i++) { THC_LOCK(&V_tcp_hostcache.hashbase[i]); CK_SLIST_FOREACH(hc_entry, - &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q) { + &V_tcp_hostcache.hashbase[i].hch_bucket, hc_q) { sbuf_printf(&sb, "%-15s %5u %8u %6lums %6lums %8u %8u %8u " #ifdef TCP_HC_COUNTERS @@ -681,20 +681,20 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS) #else "IPv6?", #endif - hc_entry->rmx_mtu, - hc_entry->rmx_ssthresh, - msec((u_long)hc_entry->rmx_rtt * + hc_entry->hc_mtu, + hc_entry->hc_ssthresh, + msec((u_long)hc_entry->hc_rtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))), - msec((u_long)hc_entry->rmx_rttvar * + msec((u_long)hc_entry->hc_rttvar * (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))), - hc_entry->rmx_cwnd, - hc_entry->rmx_sendpipe, - hc_entry->rmx_recvpipe, + hc_entry->hc_cwnd, + hc_entry->hc_sendpipe, + hc_entry->hc_recvpipe, #ifdef TCP_HC_COUNTERS - hc_entry->rmx_hits, - hc_entry->rmx_updates, + hc_entry->hc_hits, + hc_entry->hc_updates, #endif - hc_entry->rmx_expire); + hc_entry->hc_expire); } THC_UNLOCK(&V_tcp_hostcache.hashbase[i]); sbuf_drain(&sb); @@ -761,33 +761,33 @@ tcp_hc_purge_internal(int all) head = &V_tcp_hostcache.hashbase[i]; hc_prev = NULL; THC_LOCK(head); - CK_SLIST_FOREACH_SAFE(hc_entry, &head->hch_bucket, rmx_q, + CK_SLIST_FOREACH_SAFE(hc_entry, &head->hch_bucket, hc_q, hc_next) { KASSERT(head->hch_length > 0 && head->hch_length <= V_tcp_hostcache.bucket_limit, ("tcp_hostcache: " "bucket length out of range at %u: %u", i, head->hch_length)); if (all || - atomic_load_int(&hc_entry->rmx_expire) <= 0) { + atomic_load_int(&hc_entry->hc_expire) <= 0) { if (hc_prev != NULL) { KASSERT(hc_entry == - CK_SLIST_NEXT(hc_prev, rmx_q), + CK_SLIST_NEXT(hc_prev, hc_q), ("%s: %p is not next to %p", __func__, hc_entry, hc_prev)); - CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q); + CK_SLIST_REMOVE_AFTER(hc_prev, hc_q); } else { KASSERT(hc_entry == CK_SLIST_FIRST(&head->hch_bucket), ("%s: %p is not first", __func__, hc_entry)); CK_SLIST_REMOVE_HEAD(&head->hch_bucket, - rmx_q); + hc_q); } uma_zfree_smr(V_tcp_hostcache.zone, hc_entry); head->hch_length--; atomic_subtract_int(&V_tcp_hostcache.cache_count, 1); } else { - atomic_subtract_int(&hc_entry->rmx_expire, + atomic_subtract_int(&hc_entry->hc_expire, V_tcp_hostcache.prune); hc_prev = hc_entry; } diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c index 7ef480d949e4..f9ecdf93cc47 100644 --- a/sys/netinet/tcp_input.c +++ b/sys/netinet/tcp_input.c @@ -368,11 +368,11 @@ cc_conn_init(struct tcpcb *tp) tcp_hc_get(&inp->inp_inc, &metrics); maxseg = tcp_maxseg(tp); - if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { + if (tp->t_srtt == 0 && (rtt = metrics.hc_rtt)) { tp->t_srtt = rtt; TCPSTAT_INC(tcps_usedrtt); - if (metrics.rmx_rttvar) { - tp->t_rttvar = metrics.rmx_rttvar; + if (metrics.hc_rttvar) { + tp->t_rttvar = metrics.hc_rttvar; TCPSTAT_INC(tcps_usedrttvar); } else { /* default variation is +- 1 rtt */ @@ -383,14 +383,14 @@ cc_conn_init(struct tcpcb *tp) ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, tp->t_rttmin, TCPTV_REXMTMAX); } - if (metrics.rmx_ssthresh) { + if (metrics.hc_ssthresh) { /* * There's some sort of gateway or interface * buffer limit on the path. Use this to set * the slow start threshold, but set the * threshold to no less than 2*mss. */ - tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh); + tp->snd_ssthresh = max(2 * maxseg, metrics.hc_ssthresh); TCPSTAT_INC(tcps_usedssthresh); } @@ -3888,8 +3888,8 @@ tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, * If there's a discovered mtu in tcp hostcache, use it. * Else, use the link mtu. */ - if (metrics.rmx_mtu) - mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; + if (metrics.hc_mtu) + mss = min(metrics.hc_mtu, maxmtu) - min_protoh; else { #ifdef INET6 if (isipv6) { @@ -3981,8 +3981,8 @@ tcp_mss(struct tcpcb *tp, int offer) */ so = inp->inp_socket; SOCK_SENDBUF_LOCK(so); - if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) - bufsize = metrics.rmx_sendpipe; + if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.hc_sendpipe) + bufsize = metrics.hc_sendpipe; else bufsize = so->so_snd.sb_hiwat; if (bufsize < mss) @@ -4016,8 +4016,8 @@ tcp_mss(struct tcpcb *tp, int offer) } SOCK_RECVBUF_LOCK(so); - if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) - bufsize = metrics.rmx_recvpipe; + if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.hc_recvpipe) + bufsize = metrics.hc_recvpipe; else bufsize = so->so_rcv.sb_hiwat; if (bufsize > mss) { diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c index 872ea6249051..c30b3aaf76c3 100644 --- a/sys/netinet/tcp_subr.c +++ b/sys/netinet/tcp_subr.c @@ -2507,13 +2507,13 @@ tcp_discardcb(struct tcpcb *tp) ); } else ssthresh = 0; - metrics.rmx_ssthresh = ssthresh; + metrics.hc_ssthresh = ssthresh; - metrics.rmx_rtt = tp->t_srtt; - metrics.rmx_rttvar = tp->t_rttvar; - metrics.rmx_cwnd = tp->snd_cwnd; - metrics.rmx_sendpipe = 0; - metrics.rmx_recvpipe = 0; + metrics.hc_rtt = tp->t_srtt; + metrics.hc_rttvar = tp->t_rttvar; + metrics.hc_cwnd = tp->snd_cwnd; + metrics.hc_sendpipe = 0; + metrics.hc_recvpipe = 0; tcp_hc_update(&inp->inp_inc, &metrics); } diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h index 9f28e8247e3e..17c39bd93287 100644 --- a/sys/netinet/tcp_var.h +++ b/sys/netinet/tcp_var.h @@ -892,13 +892,13 @@ struct tcpopt { #define TO_SYN 0x01 /* parse SYN-only options */ struct hc_metrics_lite { /* must stay in sync with hc_metrics */ - uint32_t rmx_mtu; /* MTU for this path */ - uint32_t rmx_ssthresh; /* outbound gateway buffer limit */ - uint32_t rmx_rtt; /* estimated round trip time */ - uint32_t rmx_rttvar; /* estimated rtt variance */ - uint32_t rmx_cwnd; /* congestion window */ - uint32_t rmx_sendpipe; /* outbound delay-bandwidth product */ - uint32_t rmx_recvpipe; /* inbound delay-bandwidth product */ + uint32_t hc_mtu; /* MTU for this path */ + uint32_t hc_ssthresh; /* outbound gateway buffer limit */ + uint32_t hc_rtt; /* estimated round trip time */ + uint32_t hc_rttvar; /* estimated rtt variance */ + uint32_t hc_cwnd; /* congestion window */ + uint32_t hc_sendpipe; /* outbound delay-bandwidth product */ + uint32_t hc_recvpipe; /* inbound delay-bandwidth product */ }; #ifndef _NETINET_IN_PCB_H_
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202411210039.4AL0dAGL044273>