Date: Fri, 13 Sep 2019 01:12:17 +0000 (UTC) From: Navdeep Parhar <np@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org Subject: svn commit: r352271 - in stable/12/sys/dev/cxgbe: . cxgbei iw_cxgbe tom Message-ID: <201909130112.x8D1CHEZ041114@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: np Date: Fri Sep 13 01:12:17 2019 New Revision: 352271 URL: https://svnweb.freebsd.org/changeset/base/352271 Log: MFC r351540, r351590 r351540: cxgbe/t4_tom: Initialize all TOE connection parameters in one place. Remove now-redundant items from toepcb and synq_entry and the code to support them. Let the driver calculate tx_align, rx_coalesce, and sndbuf by default. Reviewed by: jhb@ Sponsored by: Chelsio Communications Differential Revision: https://reviews.freebsd.org/D21387 r351590: cxgbe/t4_tom: Use the correct value of sndbuf in AIO Tx. This should have been part of r351540. Sponsored by: Chelsio Communications Modified: stable/12/sys/dev/cxgbe/cxgbei/icl_cxgbei.c stable/12/sys/dev/cxgbe/iw_cxgbe/qp.c stable/12/sys/dev/cxgbe/t4_main.c stable/12/sys/dev/cxgbe/tom/t4_connect.c stable/12/sys/dev/cxgbe/tom/t4_cpl_io.c stable/12/sys/dev/cxgbe/tom/t4_ddp.c stable/12/sys/dev/cxgbe/tom/t4_listen.c stable/12/sys/dev/cxgbe/tom/t4_tls.c stable/12/sys/dev/cxgbe/tom/t4_tom.c stable/12/sys/dev/cxgbe/tom/t4_tom.h Directory Properties: stable/12/ (props changed) Modified: stable/12/sys/dev/cxgbe/cxgbei/icl_cxgbei.c ============================================================================== --- stable/12/sys/dev/cxgbe/cxgbei/icl_cxgbei.c Fri Sep 13 01:07:19 2019 (r352270) +++ stable/12/sys/dev/cxgbe/cxgbei/icl_cxgbei.c Fri Sep 13 01:12:17 2019 (r352271) @@ -696,7 +696,7 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd) ISCSI_DATA_DIGEST_SIZE; } so->so_options |= SO_NO_DDP; - toep->ulp_mode = ULP_MODE_ISCSI; + toep->params.ulp_mode = ULP_MODE_ISCSI; toep->ulpcb = icc; send_iscsi_flowc_wr(icc->sc, toep, ci->max_tx_pdu_len); Modified: stable/12/sys/dev/cxgbe/iw_cxgbe/qp.c ============================================================================== --- stable/12/sys/dev/cxgbe/iw_cxgbe/qp.c Fri Sep 13 01:07:19 2019 (r352270) +++ stable/12/sys/dev/cxgbe/iw_cxgbe/qp.c Fri Sep 13 01:12:17 2019 (r352271) @@ -1415,7 +1415,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid, qhp->wq.sq.qid, ep->com.so, __func__); - toep->ulp_mode = ULP_MODE_RDMA; + toep->params.ulp_mode = ULP_MODE_RDMA; free_ird(rhp, qhp->attr.max_ird); return ret; Modified: stable/12/sys/dev/cxgbe/t4_main.c ============================================================================== --- stable/12/sys/dev/cxgbe/t4_main.c Fri Sep 13 01:07:19 2019 (r352270) +++ stable/12/sys/dev/cxgbe/t4_main.c Fri Sep 13 01:12:17 2019 (r352271) @@ -6231,15 +6231,15 @@ t4_sysctls(struct adapter *sc) "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, " "3 = highspeed)"); - sc->tt.sndbuf = 256 * 1024; + sc->tt.sndbuf = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, - &sc->tt.sndbuf, 0, "max hardware send buffer size"); + &sc->tt.sndbuf, 0, "hardware send buffer"); sc->tt.ddp = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, &sc->tt.ddp, 0, "DDP allowed"); - sc->tt.rx_coalesce = 1; + sc->tt.rx_coalesce = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); @@ -6251,7 +6251,7 @@ t4_sysctls(struct adapter *sc) CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports, "I", "TCP ports that use inline TLS+TOE RX"); - sc->tt.tx_align = 1; + sc->tt.tx_align = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); Modified: stable/12/sys/dev/cxgbe/tom/t4_connect.c ============================================================================== --- stable/12/sys/dev/cxgbe/tom/t4_connect.c Fri Sep 13 01:07:19 2019 (r352270) +++ stable/12/sys/dev/cxgbe/tom/t4_connect.c Fri Sep 13 01:12:17 2019 (r352271) @@ -102,7 +102,7 @@ do_act_establish(struct sge_iq *iq, const struct rss_h make_established(toep, be32toh(cpl->snd_isn) - 1, be32toh(cpl->rcv_isn) - 1, cpl->tcp_opt); - if (toep->ulp_mode == ULP_MODE_TLS) + if (ulp_mode(toep) == ULP_MODE_TLS) tls_establish(toep); done: @@ -165,96 +165,6 @@ do_act_open_rpl(struct sge_iq *iq, const struct rss_he return (0); } -/* - * Options2 for active open. - */ -static uint32_t -calc_opt2a(struct socket *so, struct toepcb *toep, - const struct offload_settings *s) -{ - struct tcpcb *tp = so_sototcpcb(so); - struct port_info *pi = toep->vi->pi; - struct adapter *sc = pi->adapter; - uint32_t opt2 = 0; - - /* - * rx flow control, rx coalesce, congestion control, and tx pace are all - * explicitly set by the driver. On T5+ the ISS is also set by the - * driver to the value picked by the kernel. - */ - if (is_t4(sc)) { - opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID; - opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID; - } else { - opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */ - opt2 |= F_T5_ISS; /* ISS provided in CPL */ - } - - if (s->sack > 0 || (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT))) - opt2 |= F_SACK_EN; - - if (s->tstamp > 0 || (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP))) - opt2 |= F_TSTAMPS_EN; - - if (tp->t_flags & TF_REQ_SCALE) - opt2 |= F_WND_SCALE_EN; - - if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1)) - opt2 |= F_CCTRL_ECN; - - /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */ - - opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]); - - /* These defaults are subject to ULP specific fixups later. */ - opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0); - - opt2 |= V_PACE(0); - - if (s->cong_algo >= 0) - opt2 |= V_CONG_CNTRL(s->cong_algo); - else if (sc->tt.cong_algorithm >= 0) - opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL); - else { - struct cc_algo *cc = CC_ALGO(tp); - - if (strcasecmp(cc->name, "reno") == 0) - opt2 |= V_CONG_CNTRL(CONG_ALG_RENO); - else if (strcasecmp(cc->name, "tahoe") == 0) - opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); - if (strcasecmp(cc->name, "newreno") == 0) - opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); - if (strcasecmp(cc->name, "highspeed") == 0) - opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED); - else { - /* - * Use newreno in case the algorithm selected by the - * host stack is not supported by the hardware. - */ - opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); - } - } - - if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce)) - opt2 |= V_RX_COALESCE(M_RX_COALESCE); - - /* Note that ofld_rxq is already set according to s->rxq. */ - opt2 |= F_RSS_QUEUE_VALID; - opt2 |= V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id); - -#ifdef USE_DDP_RX_FLOW_CONTROL - if (toep->ulp_mode == ULP_MODE_TCPDDP) - opt2 |= F_RX_FC_DDP; -#endif - - if (toep->ulp_mode == ULP_MODE_TLS) { - opt2 &= ~V_RX_COALESCE(M_RX_COALESCE); - opt2 |= F_RX_FC_DISABLE; - } - - return (htobe32(opt2)); -} - void t4_init_connect_cpl_handlers(void) { @@ -322,7 +232,7 @@ t4_connect(struct toedev *tod, struct socket *so, stru struct wrqe *wr = NULL; struct ifnet *rt_ifp = rt->rt_ifp; struct vi_info *vi; - int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid; + int qid_atid, rc, isipv6; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); int reason; @@ -353,18 +263,7 @@ t4_connect(struct toedev *tod, struct socket *so, stru if (!settings.offload) DONT_OFFLOAD_ACTIVE_OPEN(EPERM); - if (settings.txq >= 0 && settings.txq < vi->nofldtxq) - txqid = settings.txq; - else - txqid = arc4random() % vi->nofldtxq; - txqid += vi->first_ofld_txq; - if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq) - rxqid = settings.rxq; - else - rxqid = arc4random() % vi->nofldrxq; - rxqid += vi->first_ofld_rxq; - - toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO); + toep = alloc_toepcb(vi, M_NOWAIT); if (toep == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); @@ -377,27 +276,16 @@ t4_connect(struct toedev *tod, struct socket *so, stru if (toep->l2te == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); + toep->vnet = so->so_vnet; + init_conn_params(vi, &settings, &inp->inp_inc, so, NULL, + toep->l2te->idx, &toep->params); + init_toepcb(vi, toep); + isipv6 = nam->sa_family == AF_INET6; wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq); if (wr == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); - toep->vnet = so->so_vnet; - set_ulp_mode(toep, select_ulp_mode(so, sc, &settings)); - SOCKBUF_LOCK(&so->so_rcv); - toep->opt0_rcv_bufsize = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ); - SOCKBUF_UNLOCK(&so->so_rcv); - - /* - * The kernel sets request_r_scale based on sb_max whereas we need to - * take hardware's MAX_RCV_WND into account too. This is normally a - * no-op as MAX_RCV_WND is much larger than the default sb_max. - */ - if (tp->t_flags & TF_REQ_SCALE) - rscale = tp->request_r_scale = select_rcv_wscale(); - else - rscale = 0; - mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings); qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) | V_TID_COOKIE(CPL_COOKIE_TOM); @@ -438,9 +326,13 @@ t4_connect(struct toedev *tod, struct socket *so, stru cpl->peer_port = inp->inp_fport; cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0]; cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8]; - cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, - toep->opt0_rcv_bufsize, toep->ulp_mode, &settings); - cpl->opt2 = calc_opt2a(so, toep, &settings); + cpl->opt0 = calc_options0(vi, &toep->params); + cpl->opt2 = calc_options2(vi, &toep->params); + + CTR6(KTR_CXGBE, + "%s: atid %u, toep %p, inp %p, opt0 %#016lx, opt2 %#08x", + __func__, toep->tid, toep, inp, be64toh(cpl->opt0), + be32toh(cpl->opt2)); } else { struct cpl_act_open_req *cpl = wrtod(wr); struct cpl_t5_act_open_req *cpl5 = (void *)cpl; @@ -467,13 +359,14 @@ t4_connect(struct toedev *tod, struct socket *so, stru qid_atid)); inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, &cpl->peer_ip, &cpl->peer_port); - cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, - toep->opt0_rcv_bufsize, toep->ulp_mode, &settings); - cpl->opt2 = calc_opt2a(so, toep, &settings); - } + cpl->opt0 = calc_options0(vi, &toep->params); + cpl->opt2 = calc_options2(vi, &toep->params); - CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__, - toep->tid, tcpstates[tp->t_state], toep, inp); + CTR6(KTR_CXGBE, + "%s: atid %u, toep %p, inp %p, opt0 %#016lx, opt2 %#08x", + __func__, toep->tid, toep, inp, be64toh(cpl->opt0), + be32toh(cpl->opt2)); + } offload_socket(so, toep); rc = t4_l2t_send(sc, wr, toep->l2te); Modified: stable/12/sys/dev/cxgbe/tom/t4_cpl_io.c ============================================================================== --- stable/12/sys/dev/cxgbe/tom/t4_cpl_io.c Fri Sep 13 01:07:19 2019 (r352270) +++ stable/12/sys/dev/cxgbe/tom/t4_cpl_io.c Fri Sep 13 01:12:17 2019 (r352271) @@ -99,7 +99,7 @@ aiotx_mbuf_pages(struct mbuf *m) } void -send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) +send_flowc_wr(struct toepcb *toep, struct tcpcb *tp) { struct wrqe *wr; struct fw_flowc_wr *flowc; @@ -113,17 +113,17 @@ send_flowc_wr(struct toepcb *toep, struct flowc_tx_par KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), ("%s: flowc for tid %u sent already", __func__, toep->tid)); - if (ftxp != NULL) + if (tp != NULL) nparams = 8; else nparams = 6; - if (toep->ulp_mode == ULP_MODE_TLS) + if (ulp_mode(toep) == ULP_MODE_TLS) nparams++; if (toep->tls.fcplenmax != 0) nparams++; - if (toep->tc_idx != -1) { - MPASS(toep->tc_idx >= 0 && - toep->tc_idx < sc->chip_params->nsched_cls); + if (toep->params.tc_idx != -1) { + MPASS(toep->params.tc_idx >= 0 && + toep->params.tc_idx < sc->chip_params->nsched_cls); nparams++; } @@ -155,30 +155,23 @@ send_flowc_wr(struct toepcb *toep, struct flowc_tx_par FLOWC_PARAM(CH, pi->tx_chan); FLOWC_PARAM(PORT, pi->tx_chan); FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id); - if (ftxp) { - uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); - - FLOWC_PARAM(SNDNXT, ftxp->snd_nxt); - FLOWC_PARAM(RCVNXT, ftxp->rcv_nxt); - FLOWC_PARAM(SNDBUF, sndbuf); - FLOWC_PARAM(MSS, ftxp->mss); - - CTR6(KTR_CXGBE, - "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", - __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, - ftxp->rcv_nxt); - } else { - FLOWC_PARAM(SNDBUF, 512); - FLOWC_PARAM(MSS, 512); - - CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); + FLOWC_PARAM(SNDBUF, toep->params.sndbuf); + FLOWC_PARAM(MSS, toep->params.emss); + if (tp) { + FLOWC_PARAM(SNDNXT, tp->snd_nxt); + FLOWC_PARAM(RCVNXT, tp->rcv_nxt); } - if (toep->ulp_mode == ULP_MODE_TLS) - FLOWC_PARAM(ULP_MODE, toep->ulp_mode); + CTR6(KTR_CXGBE, + "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", + __func__, toep->tid, toep->params.emss, toep->params.sndbuf, + tp ? tp->snd_nxt : 0, tp ? tp->rcv_nxt : 0); + + if (ulp_mode(toep) == ULP_MODE_TLS) + FLOWC_PARAM(ULP_MODE, ulp_mode(toep)); if (toep->tls.fcplenmax != 0) FLOWC_PARAM(TXDATAPLEN_MAX, toep->tls.fcplenmax); - if (toep->tc_idx != -1) - FLOWC_PARAM(SCHEDCLASS, toep->tc_idx); + if (toep->params.tc_idx != -1) + FLOWC_PARAM(SCHEDCLASS, toep->params.tc_idx); #undef FLOWC_PARAM KASSERT(paramidx == nparams, ("nparams mismatch")); @@ -219,7 +212,7 @@ update_tx_rate_limit(struct adapter *sc, struct toepcb MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls); } - if (toep->tc_idx != tc_idx) { + if (toep->params.tc_idx != tc_idx) { struct wrqe *wr; struct fw_flowc_wr *flowc; int nparams = 1, flowclen, flowclen16; @@ -258,9 +251,9 @@ update_tx_rate_limit(struct adapter *sc, struct toepcb t4_wrq_tx(sc, wr); } - if (toep->tc_idx >= 0) - t4_release_cl_rl(sc, port_id, toep->tc_idx); - toep->tc_idx = tc_idx; + if (toep->params.tc_idx >= 0) + t4_release_cl_rl(sc, port_id, toep->params.tc_idx); + toep->params.tc_idx = tc_idx; return (0); } @@ -335,30 +328,30 @@ assign_rxopt(struct tcpcb *tp, uint16_t opt) INP_LOCK_ASSERT(inp); - toep->tcp_opt = opt; - toep->mtu_idx = G_TCPOPT_MSS(opt); - tp->t_maxseg = sc->params.mtus[toep->mtu_idx]; + toep->params.mtu_idx = G_TCPOPT_MSS(opt); + tp->t_maxseg = sc->params.mtus[toep->params.mtu_idx]; if (inp->inp_inc.inc_flags & INC_ISIPV6) tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr); else tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr); - toep->emss = tp->t_maxseg; + toep->params.emss = tp->t_maxseg; if (G_TCPOPT_TSTAMP(opt)) { + toep->params.tstamp = 1; + toep->params.emss -= TCPOLEN_TSTAMP_APPA; tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ tp->ts_recent = 0; /* hmmm */ tp->ts_recent_age = tcp_ts_getticks(); - toep->emss -= TCPOLEN_TSTAMP_APPA; - } + } else + toep->params.tstamp = 0; - CTR6(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), t_maxseg %u, emss %u", - __func__, toep->tid, toep->mtu_idx, - sc->params.mtus[G_TCPOPT_MSS(opt)], tp->t_maxseg, toep->emss); - - if (G_TCPOPT_SACK(opt)) + if (G_TCPOPT_SACK(opt)) { + toep->params.sack = 1; tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ - else + } else { + toep->params.sack = 0; tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ + } if (G_TCPOPT_WSCALE_OK(opt)) tp->t_flags |= TF_RCVD_SCALE; @@ -368,7 +361,13 @@ assign_rxopt(struct tcpcb *tp, uint16_t opt) (TF_RCVD_SCALE | TF_REQ_SCALE)) { tp->rcv_scale = tp->request_r_scale; tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); - } + } else + toep->params.wscale = 0; + + CTR6(KTR_CXGBE, + "assign_rxopt: tid %d, mtu_idx %u, emss %u, ts %u, sack %u, wscale %u", + toep->tid, toep->params.mtu_idx, toep->params.emss, + toep->params.tstamp, toep->params.sack, toep->params.wscale); } /* @@ -383,9 +382,7 @@ make_established(struct toepcb *toep, uint32_t iss, ui struct inpcb *inp = toep->inp; struct socket *so = inp->inp_socket; struct tcpcb *tp = intotcpcb(inp); - long bufsize; uint16_t tcpopt = be16toh(opt); - struct flowc_tx_params ftxp; INP_WLOCK_ASSERT(inp); KASSERT(tp->t_state == TCPS_SYN_SENT || @@ -401,7 +398,7 @@ make_established(struct toepcb *toep, uint32_t iss, ui tp->irs = irs; tcp_rcvseqinit(tp); - tp->rcv_wnd = (u_int)toep->opt0_rcv_bufsize << 10; + tp->rcv_wnd = (u_int)toep->params.opt0_bufsize << 10; tp->rcv_adv += tp->rcv_wnd; tp->last_ack_sent = tp->rcv_nxt; @@ -412,20 +409,8 @@ make_established(struct toepcb *toep, uint32_t iss, ui tp->snd_max = iss + 1; assign_rxopt(tp, tcpopt); + send_flowc_wr(toep, tp); - SOCKBUF_LOCK(&so->so_snd); - if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) - bufsize = V_tcp_autosndbuf_max; - else - bufsize = sbspace(&so->so_snd); - SOCKBUF_UNLOCK(&so->so_snd); - - ftxp.snd_nxt = tp->snd_nxt; - ftxp.rcv_nxt = tp->rcv_nxt; - ftxp.snd_space = bufsize; - ftxp.mss = toep->emss; - send_flowc_wr(toep, &ftxp); - soisconnected(so); } @@ -481,7 +466,7 @@ t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) SOCKBUF_LOCK_ASSERT(sb); rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0; - if (toep->ulp_mode == ULP_MODE_TLS) { + if (ulp_mode(toep) == ULP_MODE_TLS) { if (toep->tls.rcv_over >= rx_credits) { toep->tls.rcv_over -= rx_credits; rx_credits = 0; @@ -600,7 +585,7 @@ max_dsgl_nsegs(int tx_credits) static inline void write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, - unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign) + unsigned int plen, uint8_t credits, int shove, int ulp_submode) { struct fw_ofld_tx_data_wr *txwr = dst; @@ -608,20 +593,18 @@ write_tx_wr(void *dst, struct toepcb *toep, unsigned i V_FW_WR_IMMDLEN(immdlen)); txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | V_FW_WR_LEN16(credits)); - txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) | + txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ulp_mode(toep)) | V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); txwr->plen = htobe32(plen); - if (txalign > 0) { - struct tcpcb *tp = intotcpcb(toep->inp); - - if (plen < 2 * toep->emss) + if (toep->params.tx_align > 0) { + if (plen < 2 * toep->params.emss) txwr->lsodisable_to_flags |= htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); else txwr->lsodisable_to_flags |= htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | - (tp->t_flags & TF_NODELAY ? 0 : + (toep->params.nagle == 0 ? 0 : F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); } } @@ -717,11 +700,11 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); - KASSERT(toep->ulp_mode == ULP_MODE_NONE || - toep->ulp_mode == ULP_MODE_TCPDDP || - toep->ulp_mode == ULP_MODE_TLS || - toep->ulp_mode == ULP_MODE_RDMA, - ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); + KASSERT(ulp_mode(toep) == ULP_MODE_NONE || + ulp_mode(toep) == ULP_MODE_TCPDDP || + ulp_mode(toep) == ULP_MODE_TLS || + ulp_mode(toep) == ULP_MODE_RDMA, + ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep)); #ifdef VERBOSE_TRACES CTR5(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", @@ -861,8 +844,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep } txwr = wrtod(wr); credits = howmany(wr->wr_len, 16); - write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, - sc->tt.tx_align); + write_tx_wr(txwr, toep, plen, plen, credits, shove, 0); m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); nsegs = 0; } else { @@ -880,8 +862,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep } txwr = wrtod(wr); credits = howmany(wr_len, 16); - write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, - sc->tt.tx_align); + write_tx_wr(txwr, toep, 0, plen, credits, shove, 0); write_tx_sgl(txwr + 1, sndptr, m, nsegs, max_nsegs_1mbuf); if (wr_len & 0xf) { @@ -901,7 +882,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep toep->tx_nocompl >= toep->tx_total / 4) compl = 1; - if (compl || toep->ulp_mode == ULP_MODE_RDMA) { + if (compl || ulp_mode(toep) == ULP_MODE_RDMA) { txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); toep->tx_nocompl = 0; toep->plen_nocompl = 0; @@ -975,8 +956,8 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, INP_WLOCK_ASSERT(inp); KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); - KASSERT(toep->ulp_mode == ULP_MODE_ISCSI, - ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); + KASSERT(ulp_mode(toep) == ULP_MODE_ISCSI, + ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep)); if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) return; @@ -1059,7 +1040,7 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, txwr = wrtod(wr); credits = howmany(wr->wr_len, 16); write_tx_wr(txwr, toep, plen, adjusted_plen, credits, - shove, ulp_submode, sc->tt.tx_align); + shove, ulp_submode); m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); nsegs = 0; } else { @@ -1077,7 +1058,7 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, txwr = wrtod(wr); credits = howmany(wr_len, 16); write_tx_wr(txwr, toep, 0, adjusted_plen, credits, - shove, ulp_submode, sc->tt.tx_align); + shove, ulp_submode); write_tx_sgl(txwr + 1, sndptr, m, nsegs, max_nsegs_1mbuf); if (wr_len & 0xf) { @@ -1143,7 +1124,7 @@ t4_tod_output(struct toedev *tod, struct tcpcb *tp) ("%s: inp %p dropped.", __func__, inp)); KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); - if (toep->ulp_mode == ULP_MODE_ISCSI) + if (ulp_mode(toep) == ULP_MODE_ISCSI) t4_push_pdus(sc, toep, 0); else if (tls_tx_key(toep)) t4_push_tls_records(sc, toep, 0); @@ -1169,7 +1150,7 @@ t4_send_fin(struct toedev *tod, struct tcpcb *tp) toep->flags |= TPF_SEND_FIN; if (tp->t_state >= TCPS_ESTABLISHED) { - if (toep->ulp_mode == ULP_MODE_ISCSI) + if (ulp_mode(toep) == ULP_MODE_ISCSI) t4_push_pdus(sc, toep, 0); else if (tls_tx_key(toep)) t4_push_tls_records(sc, toep, 0); @@ -1256,7 +1237,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_head so = inp->inp_socket; socantrcvmore(so); - if (toep->ulp_mode == ULP_MODE_TCPDDP) { + if (ulp_mode(toep) == ULP_MODE_TCPDDP) { DDP_LOCK(toep); if (__predict_false(toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) @@ -1264,7 +1245,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_head DDP_UNLOCK(toep); } - if (toep->ulp_mode != ULP_MODE_RDMA) { + if (ulp_mode(toep) != ULP_MODE_RDMA) { KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, be32toh(cpl->rcv_nxt))); @@ -1575,14 +1556,14 @@ do_rx_data(struct sge_iq *iq, const struct rss_header tp->rcv_nxt += len; if (tp->rcv_wnd < len) { - KASSERT(toep->ulp_mode == ULP_MODE_RDMA, + KASSERT(ulp_mode(toep) == ULP_MODE_RDMA, ("%s: negative window size", __func__)); } tp->rcv_wnd -= len; tp->t_rcvtime = ticks; - if (toep->ulp_mode == ULP_MODE_TCPDDP) + if (ulp_mode(toep) == ULP_MODE_TCPDDP) DDP_LOCK(toep); so = inp_inpcbtosocket(inp); sb = &so->so_rcv; @@ -1593,7 +1574,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header __func__, tid, len); m_freem(m); SOCKBUF_UNLOCK(sb); - if (toep->ulp_mode == ULP_MODE_TCPDDP) + if (ulp_mode(toep) == ULP_MODE_TCPDDP) DDP_UNLOCK(toep); INP_WUNLOCK(inp); @@ -1624,7 +1605,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header sb->sb_flags &= ~SB_AUTOSIZE; } - if (toep->ulp_mode == ULP_MODE_TCPDDP) { + if (ulp_mode(toep) == ULP_MODE_TCPDDP) { int changed = !(toep->ddp.flags & DDP_ON) ^ cpl->ddp_off; if (toep->ddp.waiting_count != 0 || toep->ddp.active_count != 0) @@ -1667,7 +1648,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header tp->rcv_adv += rx_credits; } - if (toep->ulp_mode == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 && + if (ulp_mode(toep) == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 && sbavail(sb) != 0) { CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__, tid); @@ -1675,7 +1656,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header } sorwakeup_locked(so); SOCKBUF_UNLOCK_ASSERT(sb); - if (toep->ulp_mode == ULP_MODE_TCPDDP) + if (ulp_mode(toep) == ULP_MODE_TCPDDP) DDP_UNLOCK(toep); INP_WUNLOCK(inp); @@ -1785,7 +1766,7 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header #endif toep->flags &= ~TPF_TX_SUSPENDED; CURVNET_SET(toep->vnet); - if (toep->ulp_mode == ULP_MODE_ISCSI) + if (ulp_mode(toep) == ULP_MODE_ISCSI) t4_push_pdus(sc, toep, plen); else if (tls_tx_key(toep)) t4_push_tls_records(sc, toep, plen); @@ -1798,7 +1779,7 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header SOCKBUF_LOCK(sb); sbu = sbused(sb); - if (toep->ulp_mode == ULP_MODE_ISCSI) { + if (ulp_mode(toep) == ULP_MODE_ISCSI) { if (__predict_false(sbu > 0)) { /* @@ -2013,7 +1994,6 @@ hold_aio(struct kaiocb *job) static void t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job) { - struct adapter *sc; struct sockbuf *sb; struct file *fp; struct aiotx_buffer *ab; @@ -2023,7 +2003,6 @@ t4_aiotx_process_job(struct toepcb *toep, struct socke int error; bool moretocome, sendmore; - sc = td_adapter(toep->td); sb = &so->so_snd; SOCKBUF_UNLOCK(sb); fp = job->fd_file; @@ -2107,8 +2086,8 @@ sendanother: moretocome = false; } else moretocome = true; - if (m->m_len > sc->tt.sndbuf) { - m->m_len = sc->tt.sndbuf; + if (m->m_len > toep->params.sndbuf) { + m->m_len = toep->params.sndbuf; sendmore = true; } else sendmore = false; Modified: stable/12/sys/dev/cxgbe/tom/t4_ddp.c ============================================================================== --- stable/12/sys/dev/cxgbe/tom/t4_ddp.c Fri Sep 13 01:07:19 2019 (r352270) +++ stable/12/sys/dev/cxgbe/tom/t4_ddp.c Fri Sep 13 01:12:17 2019 (r352271) @@ -769,7 +769,7 @@ do_rx_data_ddp(struct sge_iq *iq, const struct rss_hea __func__, vld, tid, toep); } - if (toep->ulp_mode == ULP_MODE_ISCSI) { + if (ulp_mode(toep) == ULP_MODE_ISCSI) { t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m); return (0); } Modified: stable/12/sys/dev/cxgbe/tom/t4_listen.c ============================================================================== --- stable/12/sys/dev/cxgbe/tom/t4_listen.c Fri Sep 13 01:07:19 2019 (r352270) +++ stable/12/sys/dev/cxgbe/tom/t4_listen.c Fri Sep 13 01:12:17 2019 (r352271) @@ -348,7 +348,7 @@ send_reset_synqe(struct toedev *tod, struct synq_entry struct ifnet *ifp = m->m_pkthdr.rcvif; struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; - struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx]; + struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx]; struct wrqe *wr; struct fw_flowc_wr *flowc; struct cpl_abort_req *req; @@ -368,8 +368,8 @@ send_reset_synqe(struct toedev *tod, struct synq_entry return; /* abort already in progress */ synqe->flags |= TPF_ABORT_SHUTDOWN; - ofld_txq = &sc->sge.ofld_txq[synqe->txqid]; - ofld_rxq = &sc->sge.ofld_rxq[synqe->rxqid]; + ofld_txq = &sc->sge.ofld_txq[synqe->params.txq_idx]; + ofld_rxq = &sc->sge.ofld_rxq[synqe->params.rxq_idx]; /* The wrqe will have two WRs - a flowc followed by an abort_req */ flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); @@ -836,7 +836,7 @@ done_with_synqe(struct adapter *sc, struct synq_entry { struct listen_ctx *lctx = synqe->lctx; struct inpcb *inp = lctx->inp; - struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx]; + struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx]; int ntids; INP_WLOCK_ASSERT(inp); @@ -887,7 +887,7 @@ do_abort_req_synqe(struct sge_iq *iq, const struct rss INP_WLOCK(inp); - ofld_txq = &sc->sge.ofld_txq[synqe->txqid]; + ofld_txq = &sc->sge.ofld_txq[synqe->params.txq_idx]; /* * If we'd initiated an abort earlier the reply to it is responsible for @@ -962,28 +962,6 @@ t4_offload_socket(struct toedev *tod, void *arg, struc synqe->flags |= TPF_SYNQE_EXPANDED; } -static inline void -save_qids_in_synqe(struct synq_entry *synqe, struct vi_info *vi, - struct offload_settings *s) -{ - uint32_t txqid, rxqid; - - if (s->txq >= 0 && s->txq < vi->nofldtxq) - txqid = s->txq; - else - txqid = arc4random() % vi->nofldtxq; - txqid += vi->first_ofld_txq; - - if (s->rxq >= 0 && s->rxq < vi->nofldrxq) - rxqid = s->rxq; - else - rxqid = arc4random() % vi->nofldrxq; - rxqid += vi->first_ofld_rxq; - - synqe->txqid = txqid; - synqe->rxqid = rxqid; -} - static void t4opt_to_tcpopt(const struct tcp_options *t4opt, struct tcpopt *to) { @@ -1006,95 +984,6 @@ t4opt_to_tcpopt(const struct tcp_options *t4opt, struc to->to_flags |= TOF_SACKPERM; } -/* - * Options2 for passive open. - */ -static uint32_t -calc_opt2p(struct adapter *sc, struct port_info *pi, int rxqid, - const struct tcp_options *tcpopt, struct tcphdr *th, int ulp_mode, - struct cc_algo *cc, const struct offload_settings *s) -{ - struct sge_ofld_rxq *ofld_rxq = &sc->sge.ofld_rxq[rxqid]; - uint32_t opt2 = 0; - - /* - * rx flow control, rx coalesce, congestion control, and tx pace are all - * explicitly set by the driver. On T5+ the ISS is also set by the - * driver to the value picked by the kernel. - */ - if (is_t4(sc)) { - opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID; - opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID; - } else { - opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */ - opt2 |= F_T5_ISS; /* ISS provided in CPL */ - } - - if (tcpopt->sack && (s->sack > 0 || (s->sack < 0 && V_tcp_do_rfc1323))) - opt2 |= F_SACK_EN; - - if (tcpopt->tstamp && - (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323))) - opt2 |= F_TSTAMPS_EN; - - if (tcpopt->wsf < 15 && V_tcp_do_rfc1323) - opt2 |= F_WND_SCALE_EN; - - if (th->th_flags & (TH_ECE | TH_CWR) && - (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn))) - opt2 |= F_CCTRL_ECN; - - /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */ - - opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]); - - /* These defaults are subject to ULP specific fixups later. */ - opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0); - - opt2 |= V_PACE(0); - - if (s->cong_algo >= 0) - opt2 |= V_CONG_CNTRL(s->cong_algo); - else if (sc->tt.cong_algorithm >= 0) - opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL); - else { - if (strcasecmp(cc->name, "reno") == 0) - opt2 |= V_CONG_CNTRL(CONG_ALG_RENO); - else if (strcasecmp(cc->name, "tahoe") == 0) - opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); - if (strcasecmp(cc->name, "newreno") == 0) - opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); - if (strcasecmp(cc->name, "highspeed") == 0) - opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED); - else { - /* - * Use newreno in case the algorithm selected by the - * host stack is not supported by the hardware. - */ - opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); - } - } - - if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce)) - opt2 |= V_RX_COALESCE(M_RX_COALESCE); - - /* Note that ofld_rxq is already set according to s->rxq. */ - opt2 |= F_RSS_QUEUE_VALID; - opt2 |= V_RSS_QUEUE(ofld_rxq->iq.abs_id); - -#ifdef USE_DDP_RX_FLOW_CONTROL - if (ulp_mode == ULP_MODE_TCPDDP) - opt2 |= F_RX_FC_DDP; -#endif - - if (ulp_mode == ULP_MODE_TLS) { - opt2 &= ~V_RX_COALESCE(M_RX_COALESCE); - opt2 |= F_RX_FC_DISABLE; - } - - return (htobe32(opt2)); -} - static void pass_accept_req_to_protohdrs(struct adapter *sc, const struct mbuf *m, struct in_conninfo *inc, struct tcphdr *th) @@ -1189,7 +1078,7 @@ send_synack(struct adapter *sc, struct synq_entry *syn { struct wrqe *wr; struct cpl_pass_accept_rpl *rpl; - struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx]; + struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx]; wr = alloc_wrqe(is_t4(sc) ? sizeof(struct cpl_pass_accept_rpl) : sizeof(struct cpl_t5_pass_accept_rpl), &sc->sge.ctrlq[0]); @@ -1385,6 +1274,9 @@ found: } atomic_store_int(&synqe->ok_to_respond, 0); + init_conn_params(vi, &settings, &inc, so, &cpl->tcpopt, e->idx, + &synqe->params); + /* * If all goes well t4_syncache_respond will get called during * syncache_add. Note that syncache_add releases the pcb lock. @@ -1395,27 +1287,12 @@ found: if (atomic_load_int(&synqe->ok_to_respond) > 0) { uint64_t opt0; uint32_t opt2; - u_int wnd; - int rscale, mtu_idx, rx_credits; - mtu_idx = find_best_mtu_idx(sc, &inc, &settings); - rscale = cpl->tcpopt.wsf && V_tcp_do_rfc1323 ? select_rcv_wscale() : 0; - wnd = max(so->sol_sbrcv_hiwat, MIN_RCV_WND); - wnd = min(wnd, MAX_RCV_WND); - rx_credits = min(wnd >> 10, M_RCV_BUFSIZ); + opt0 = calc_options0(vi, &synqe->params); + opt2 = calc_options2(vi, &synqe->params); - save_qids_in_synqe(synqe, vi, &settings); - synqe->ulp_mode = select_ulp_mode(so, sc, &settings); - - opt0 = calc_opt0(so, vi, e, mtu_idx, rscale, rx_credits, - synqe->ulp_mode, &settings); - opt2 = calc_opt2p(sc, pi, synqe->rxqid, &cpl->tcpopt, &th, - synqe->ulp_mode, CC_ALGO(intotcpcb(inp)), &settings); - insert_tid(sc, tid, synqe, ntids); synqe->tid = tid; - synqe->l2e_idx = e->idx; - synqe->rcv_bufsize = rx_credits; synqe->syn = m; m = NULL; @@ -1427,8 +1304,8 @@ found: } CTR6(KTR_CXGBE, - "%s: stid %u, tid %u, lctx %p, synqe %p, mode %d, SYNACK", - __func__, stid, tid, lctx, synqe, synqe->ulp_mode); + "%s: stid %u, tid %u, synqe %p, opt0 %#016lx, opt2 %#08x", + __func__, stid, tid, synqe, be64toh(opt0), be32toh(opt2)); } else REJECT_PASS_ACCEPT_REQ(false); @@ -1540,18 +1417,19 @@ reset: return (0); } - KASSERT(synqe->rxqid == iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0], + KASSERT(synqe->params.rxq_idx == iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0], ("%s: CPL arrived on unexpected rxq. %d %d", __func__, - synqe->rxqid, (int)(iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0]))); + synqe->params.rxq_idx, + (int)(iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0]))); - toep = alloc_toepcb(vi, synqe->txqid, synqe->rxqid, M_NOWAIT); + toep = alloc_toepcb(vi, M_NOWAIT); if (toep == NULL) goto reset; toep->tid = tid; - toep->l2te = &sc->l2t->l2tab[synqe->l2e_idx]; + toep->l2te = &sc->l2t->l2tab[synqe->params.l2t_idx]; toep->vnet = lctx->vnet; - set_ulp_mode(toep, synqe->ulp_mode); - toep->opt0_rcv_bufsize = synqe->rcv_bufsize; + bcopy(&synqe->params, &toep->params, sizeof(toep->params)); + init_toepcb(vi, toep); MPASS(be32toh(cpl->snd_isn) - 1 == synqe->iss); MPASS(be32toh(cpl->rcv_isn) - 1 == synqe->irs); Modified: stable/12/sys/dev/cxgbe/tom/t4_tls.c ============================================================================== --- stable/12/sys/dev/cxgbe/tom/t4_tls.c Fri Sep 13 01:07:19 2019 (r352270) +++ stable/12/sys/dev/cxgbe/tom/t4_tls.c Fri Sep 13 01:12:17 2019 (r352271) @@ -589,7 +589,7 @@ program_key_context(struct tcpcb *tp, struct toepcb *t "KEY_WRITE_TX", uk_ctx->proto_ver); if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX && - toep->ulp_mode != ULP_MODE_TLS) + ulp_mode(toep) != ULP_MODE_TLS) return (EOPNOTSUPP); /* Don't copy the 'tx' and 'rx' fields. */ @@ -787,7 +787,7 @@ t4_ctloutput_tls(struct socket *so, struct sockopt *so INP_WUNLOCK(inp); break; case TCP_TLSOM_CLR_TLS_TOM: - if (toep->ulp_mode == ULP_MODE_TLS) { + if (ulp_mode(toep) == ULP_MODE_TLS) { CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM", __func__, toep->tid); tls_clr_ofld_mode(toep); @@ -796,7 +796,7 @@ t4_ctloutput_tls(struct socket *so, struct sockopt *so INP_WUNLOCK(inp); break; case TCP_TLSOM_CLR_QUIES: - if (toep->ulp_mode == ULP_MODE_TLS) { + if (ulp_mode(toep) == ULP_MODE_TLS) { *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201909130112.x8D1CHEZ041114>