Date: Tue, 15 May 2018 04:24:38 +0000 (UTC) From: Navdeep Parhar <np@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r333620 - in head/sys/dev/cxgbe: . common firmware tom Message-ID: <201805150424.w4F4OcBV088548@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: np Date: Tue May 15 04:24:38 2018 New Revision: 333620 URL: https://svnweb.freebsd.org/changeset/base/333620 Log: cxgbe(4): Filtering related features and fixes. - Driver support for hardware NAT. - Driver support for swapmac action. - Validate a request to create a hashfilter against the filter mask. - Add a hashfilter config file for T5. Sponsored by: Chelsio Communications Added: head/sys/dev/cxgbe/firmware/t5fw_cfg_hashfilter.txt (contents, props changed) Modified: head/sys/dev/cxgbe/common/common.h head/sys/dev/cxgbe/common/t4_hw.c head/sys/dev/cxgbe/common/t4_regs_values.h head/sys/dev/cxgbe/t4_filter.c head/sys/dev/cxgbe/tom/t4_tom.c Modified: head/sys/dev/cxgbe/common/common.h ============================================================================== --- head/sys/dev/cxgbe/common/common.h Tue May 15 02:26:50 2018 (r333619) +++ head/sys/dev/cxgbe/common/common.h Tue May 15 04:24:38 2018 (r333620) @@ -238,6 +238,7 @@ struct tp_params { uint32_t vlan_pri_map; uint32_t ingress_config; + uint64_t hash_filter_mask; __be16 err_vec_mask; int8_t fcoe_shift; Modified: head/sys/dev/cxgbe/common/t4_hw.c ============================================================================== --- head/sys/dev/cxgbe/common/t4_hw.c Tue May 15 02:26:50 2018 (r333619) +++ head/sys/dev/cxgbe/common/t4_hw.c Tue May 15 04:24:38 2018 (r333620) @@ -8368,6 +8368,7 @@ int t4_init_sge_params(struct adapter *adapter) static void read_filter_mode_and_ingress_config(struct adapter *adap, bool sleep_ok) { + uint32_t v; struct tp_params *tpp = &adap->params.tp; t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP, @@ -8391,12 +8392,12 @@ static void read_filter_mode_and_ingress_config(struct tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE); tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION); - /* - * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID - * represents the presence of an Outer VLAN instead of a VNIC ID. - */ - if ((tpp->ingress_config & F_VNIC) == 0) - tpp->vnic_shift = -1; + if (chip_id(adap) > CHELSIO_T4) { + v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3)); + adap->params.tp.hash_filter_mask = v; + v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4)); + adap->params.tp.hash_filter_mask |= (u64)v << 32; + } } /** Modified: head/sys/dev/cxgbe/common/t4_regs_values.h ============================================================================== --- head/sys/dev/cxgbe/common/t4_regs_values.h Tue May 15 02:26:50 2018 (r333619) +++ head/sys/dev/cxgbe/common/t4_regs_values.h Tue May 15 04:24:38 2018 (r333620) @@ -292,6 +292,17 @@ #define W_FT_MPSHITTYPE 3 #define W_FT_FRAGMENTATION 1 +#define M_FT_FCOE ((1ULL << W_FT_FCOE) - 1) +#define M_FT_PORT ((1ULL << W_FT_PORT) - 1) +#define M_FT_VNIC_ID ((1ULL << W_FT_VNIC_ID) - 1) +#define M_FT_VLAN ((1ULL << W_FT_VLAN) - 1) +#define M_FT_TOS ((1ULL << W_FT_TOS) - 1) +#define M_FT_PROTOCOL ((1ULL << W_FT_PROTOCOL) - 1) +#define M_FT_ETHERTYPE ((1ULL << W_FT_ETHERTYPE) - 1) +#define M_FT_MACMATCH ((1ULL << W_FT_MACMATCH) - 1) +#define M_FT_MPSHITTYPE ((1ULL << W_FT_MPSHITTYPE) - 1) +#define M_FT_FRAGMENTATION ((1ULL << W_FT_FRAGMENTATION) - 1) + /* * Some of the Compressed Filter Tuple fields have internal structure. These * bit shifts/masks describe those structures. All shifts are relative to the Added: head/sys/dev/cxgbe/firmware/t5fw_cfg_hashfilter.txt ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ head/sys/dev/cxgbe/firmware/t5fw_cfg_hashfilter.txt Tue May 15 04:24:38 2018 (r333620) @@ -0,0 +1,300 @@ +# Firmware configuration file. +# +# Global limits (some are hardware limits, others are due to the firmware). +# nvi = 128 virtual interfaces +# niqflint = 1023 ingress queues with freelists and/or interrupts +# nethctrl = 64K Ethernet or ctrl egress queues +# neq = 64K egress queues of all kinds, including freelists +# nexactf = 512 MPS TCAM entries, can oversubscribe. +# + +[global] + rss_glb_config_mode = basicvirtual + rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp + + # PL_TIMEOUT register + pl_timeout_value = 10000 # the timeout value in units of us + + # SGE_THROTTLE_CONTROL + bar2throttlecount = 500 # bar2throttlecount in us + + sge_timer_value = 1, 5, 10, 50, 100, 200 # SGE_TIMER_VALUE* in usecs + + reg[0x1124] = 0x00000400/0x00000400 # SGE_CONTROL2, enable VFIFO; if + # SGE_VFIFO_SIZE is not set, then + # firmware will set it up in function + # of number of egress queues used + + reg[0x1130] = 0x00d5ffeb # SGE_DBP_FETCH_THRESHOLD, fetch + # threshold set to queue depth + # minus 128-entries for FL and HP + # queues, and 0xfff for LP which + # prompts the firmware to set it up + # in function of egress queues + # used + + reg[0x113c] = 0x0002ffc0 # SGE_VFIFO_SIZE, set to 0x2ffc0 which + # prompts the firmware to set it up in + # function of number of egress queues + # used + + # enable TP_OUT_CONFIG.IPIDSPLITMODE + reg[0x7d04] = 0x00010000/0x00010000 + + # disable TP_PARA_REG3.RxFragEn + reg[0x7d6c] = 0x00000000/0x00007000 + + # enable TP_PARA_REG6.EnableCSnd + reg[0x7d78] = 0x00000400/0x00000000 + + reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT + + filterMode = fragmentation, mpshittype, protocol, vlan, port, fcoe + filterMask = port, protocol + + tp_pmrx = 20, 512 + tp_pmrx_pagesize = 16K + + # TP number of RX channels (0 = auto) + tp_nrxch = 0 + + tp_pmtx = 40, 512 + tp_pmtx_pagesize = 64K + + # TP number of TX channels (0 = auto) + tp_ntxch = 0 + + # TP OFLD MTUs + tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600 + + # TP_GLOBAL_CONFIG + reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable + + # TP_PC_CONFIG + reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError + + # TP_PC_CONFIG2 + reg[0x7d4c] = 0x00010000/0x00010000 # set DisableNewPshFlag + + # TP_PARA_REG0 + reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6 + + # TP_PARA_REG3 + reg[0x7d6c] = 0x28000000/0x28000000 # set EnableTnlCngHdr + # set RxMacCheck (Note: + # Only for hash filter, + # no tcp offload) + + # TP_PIO_ADDR:TP_RX_LPBK + reg[tp_pio:0x28] = 0x00208208/0x00ffffff # set commit limits to 8 + + # MC configuration + mc_mode_brc[0] = 0 # mc0 - 1: enable BRC, 0: enable RBC + mc_mode_brc[1] = 0 # mc1 - 1: enable BRC, 0: enable RBC + + # ULP_TX_CONFIG + reg[0x8dc0] = 0x00000004/0x00000004 # Enable more error msg for ... + # TPT error. + +# PFs 0-3. These get 8 MSI/8 MSI-X vectors each. VFs are supported by +# these 4 PFs only. +[function "0"] + nvf = 4 + wx_caps = all + r_caps = all + nvi = 2 + rssnvi = 2 + niqflint = 4 + nethctrl = 4 + neq = 8 + nexactf = 4 + cmask = all + pmask = 0x1 + +[function "1"] + nvf = 4 + wx_caps = all + r_caps = all + nvi = 2 + rssnvi = 2 + niqflint = 4 + nethctrl = 4 + neq = 8 + nexactf = 4 + cmask = all + pmask = 0x2 + +[function "2"] + nvf = 4 + wx_caps = all + r_caps = all + nvi = 2 + rssnvi = 2 + niqflint = 4 + nethctrl = 4 + neq = 8 + nexactf = 4 + cmask = all + pmask = 0x4 + +[function "3"] + nvf = 4 + wx_caps = all + r_caps = all + nvi = 2 + rssnvi = 2 + niqflint = 4 + nethctrl = 4 + neq = 8 + nexactf = 4 + cmask = all + pmask = 0x8 + +# PF4 is the resource-rich PF that the bus/nexus driver attaches to. +# It gets 32 MSI/128 MSI-X vectors. +[function "4"] + wx_caps = all + r_caps = all + nvi = 32 + rssnvi = 8 + niqflint = 512 + nethctrl = 1024 + neq = 2048 + nqpcq = 8192 + nexactf = 456 + cmask = all + pmask = all + + # driver will mask off features it won't use + protocol = nic_hashfilter + + tp_l2t = 4096 + + # TCAM has 8K cells; each region must start at a multiple of 128 cell. + # Each entry in these categories takes 4 cells each. nhash will use the + # TCAM iff there is room left (that is, the rest don't add up to 2048). + nroute = 32 + nclip = 32 + nfilter = 1008 + nserver = 512 + nhash = 524288 + +# PF5 is the SCSI Controller PF. It gets 32 MSI/40 MSI-X vectors. +# Not used right now. +[function "5"] + nvi = 1 + rssnvi = 0 + +# PF6 is the FCoE Controller PF. It gets 32 MSI/40 MSI-X vectors. +# Not used right now. +[function "6"] + nvi = 1 + rssnvi = 0 + +# The following function, 1023, is not an actual PCIE function but is used to +# configure and reserve firmware internal resources that come from the global +# resource pool. +[function "1023"] + wx_caps = all + r_caps = all + nvi = 4 + rssnvi = 0 + cmask = all + pmask = all + nexactf = 8 + nfilter = 16 + +# For Virtual functions, we only allow NIC functionality and we only allow +# access to one port (1 << PF). Note that because of limitations in the +# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL +# and GTS registers, the number of Ingress and Egress Queues must be a power +# of 2. +# +[function "0/*"] + wx_caps = 0x82 + r_caps = 0x86 + nvi = 1 + rssnvi = 1 + niqflint = 2 + nethctrl = 2 + neq = 4 + nexactf = 2 + cmask = all + pmask = 0x1 + +[function "1/*"] + wx_caps = 0x82 + r_caps = 0x86 + nvi = 1 + rssnvi = 1 + niqflint = 2 + nethctrl = 2 + neq = 4 + nexactf = 2 + cmask = all + pmask = 0x2 + +[function "2/*"] + wx_caps = 0x82 + r_caps = 0x86 + nvi = 1 + rssnvi = 1 + niqflint = 2 + nethctrl = 2 + neq = 4 + nexactf = 2 + cmask = all + pmask = 0x4 + +[function "3/*"] + wx_caps = 0x82 + r_caps = 0x86 + nvi = 1 + rssnvi = 1 + niqflint = 2 + nethctrl = 2 + neq = 4 + nexactf = 2 + cmask = all + pmask = 0x8 + +# MPS has 192K buffer space for ingress packets from the wire as well as +# loopback path of the L2 switch. +[port "0"] + dcb = none + bg_mem = 25 + lpbk_mem = 25 + hwm = 30 + lwm = 15 + dwm = 30 + +[port "1"] + dcb = none + bg_mem = 25 + lpbk_mem = 25 + hwm = 30 + lwm = 15 + dwm = 30 + +[port "2"] + dcb = none + bg_mem = 25 + lpbk_mem = 25 + hwm = 30 + lwm = 15 + dwm = 30 + +[port "3"] + dcb = none + bg_mem = 25 + lpbk_mem = 25 + hwm = 30 + lwm = 15 + dwm = 30 + +[fini] + version = 0x1 + checksum = 0x380a0a4 +# +# $FreeBSD$ +# Modified: head/sys/dev/cxgbe/t4_filter.c ============================================================================== --- head/sys/dev/cxgbe/t4_filter.c Tue May 15 02:26:50 2018 (r333619) +++ head/sys/dev/cxgbe/t4_filter.c Tue May 15 04:24:38 2018 (r333620) @@ -64,7 +64,7 @@ struct filter_entry { static void free_filter_resources(struct filter_entry *); static int get_hashfilter(struct adapter *, struct t4_filter *); -static int set_hashfilter(struct adapter *, struct t4_filter *, +static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t, struct l2t_entry *); static int del_hashfilter(struct adapter *, struct t4_filter *); static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *); @@ -96,50 +96,6 @@ remove_hftid(struct adapter *sc, int tid, int ntids) } static uint32_t -fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) -{ - uint32_t mode; - - mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | - T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; - - if (fconf & F_FRAGMENTATION) - mode |= T4_FILTER_IP_FRAGMENT; - - if (fconf & F_MPSHITTYPE) - mode |= T4_FILTER_MPS_HIT_TYPE; - - if (fconf & F_MACMATCH) - mode |= T4_FILTER_MAC_IDX; - - if (fconf & F_ETHERTYPE) - mode |= T4_FILTER_ETH_TYPE; - - if (fconf & F_PROTOCOL) - mode |= T4_FILTER_IP_PROTO; - - if (fconf & F_TOS) - mode |= T4_FILTER_IP_TOS; - - if (fconf & F_VLAN) - mode |= T4_FILTER_VLAN; - - if (fconf & F_VNIC_ID) { - mode |= T4_FILTER_VNIC; - if (iconf & F_VNIC) - mode |= T4_FILTER_IC_VNIC; - } - - if (fconf & F_PORT) - mode |= T4_FILTER_PORT; - - if (fconf & F_FCOE) - mode |= T4_FILTER_FCoE; - - return (mode); -} - -static uint32_t mode_to_fconf(uint32_t mode) { uint32_t fconf = 0; @@ -186,7 +142,8 @@ mode_to_iconf(uint32_t mode) return (0); } -static int check_fspec_against_fconf_iconf(struct adapter *sc, +static int +check_fspec_against_fconf_iconf(struct adapter *sc, struct t4_filter_specification *fs) { struct tp_params *tpp = &sc->params.tp; @@ -240,15 +197,38 @@ static int check_fspec_against_fconf_iconf(struct adap int get_filter_mode(struct adapter *sc, uint32_t *mode) { - struct tp_params *tpp = &sc->params.tp; + struct tp_params *tp = &sc->params.tp; + uint64_t mask; - /* - * We trust the cached values of the relevant TP registers. This means - * things work reliably only if writes to those registers are always via - * t4_set_filter_mode. - */ - *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); + /* Non-zero incoming value in mode means "hashfilter mode". */ + mask = *mode ? tp->hash_filter_mask : UINT64_MAX; + /* Always */ + *mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | + T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; + +#define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit) do { \ + if (tp->vlan_pri_map & (fconf_bit)) { \ + MPASS(tp->field_shift >= 0); \ + if ((mask >> tp->field_shift & field_mask) == field_mask) \ + *mode |= (mode_bit); \ + } \ +} while (0) + + CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT); + CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE); + CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX); + CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE); + CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO); + CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS); + CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN); + CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC); + if (tp->ingress_config & F_VNIC) + *mode |= T4_FILTER_IC_VNIC; + CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT); + CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE); +#undef CHECK_FIELD + return (0); } @@ -361,7 +341,7 @@ static int set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te) { struct filter_entry *f; - struct fw_filter_wr *fwr; + struct fw_filter2_wr *fwr; u_int vnic_vld, vnic_vld_mask; struct wrq_cookie cookie; int i, rc, busy, locked; @@ -385,8 +365,13 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t else if (busy > 0) rc = EBUSY; else { - fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), - &cookie); + int len16; + + if (sc->params.filter2_wr_support) + len16 = howmany(sizeof(struct fw_filter2_wr), 16); + else + len16 = howmany(sizeof(struct fw_filter_wr), 16); + fwr = start_wrq_wr(&sc->sge.mgmtq, len16, &cookie); if (__predict_false(fwr == NULL)) rc = ENOMEM; else { @@ -419,7 +404,10 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t vnic_vld_mask = 0; bzero(fwr, sizeof(*fwr)); - fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); + if (sc->params.filter2_wr_support) + fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR)); + else + fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); fwr->tid_to_iq = htobe32(V_FW_FILTER_WR_TID(f->tid) | @@ -484,6 +472,20 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t /* XXX: need to use SMT idx instead */ bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); } + if (sc->params.filter2_wr_support) { + fwr->filter_type_swapmac = + V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac); + fwr->natmode_to_ulp_type = + V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ? + ULP_MODE_TCPDDP : ULP_MODE_NONE) | + V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) | + V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode); + memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip)); + memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip)); + fwr->newlport = htobe16(f->fs.nat_dport); + fwr->newfport = htobe16(f->fs.nat_sport); + fwr->natseqcheck = htobe32(f->fs.nat_seq_chk); + } commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); /* Wait for response. */ @@ -502,11 +504,88 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t return (rc); } +static int +hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs, + uint64_t *ftuple) +{ + struct tp_params *tp = &sc->params.tp; + uint64_t fmask; + + *ftuple = fmask = 0; + + /* + * Initialize each of the fields which we care about which are present + * in the Compressed Filter Tuple. + */ + if (tp->vlan_shift >= 0 && fs->mask.vlan) { + *ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift; + fmask |= M_FT_VLAN << tp->vlan_shift; + } + + if (tp->port_shift >= 0 && fs->mask.iport) { + *ftuple |= (uint64_t)fs->val.iport << tp->port_shift; + fmask |= M_FT_PORT << tp->port_shift; + } + + if (tp->protocol_shift >= 0 && fs->mask.proto) { + *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift; + fmask |= M_FT_PROTOCOL << tp->protocol_shift; + } + + if (tp->tos_shift >= 0 && fs->mask.tos) { + *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift; + fmask |= M_FT_TOS << tp->tos_shift; + } + + if (tp->vnic_shift >= 0 && fs->mask.vnic) { + /* F_VNIC in ingress config was already validated. */ + if (tp->ingress_config & F_VNIC) + MPASS(fs->mask.pfvf_vld); + else + MPASS(fs->mask.ovlan_vld); + + *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift; + fmask |= M_FT_VNIC_ID << tp->vnic_shift; + } + + if (tp->macmatch_shift >= 0 && fs->mask.macidx) { + *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift; + fmask |= M_FT_MACMATCH << tp->macmatch_shift; + } + + if (tp->ethertype_shift >= 0 && fs->mask.ethtype) { + *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift; + fmask |= M_FT_ETHERTYPE << tp->ethertype_shift; + } + + if (tp->matchtype_shift >= 0 && fs->mask.matchtype) { + *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift; + fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift; + } + + if (tp->frag_shift >= 0 && fs->mask.frag) { + *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift; + fmask |= M_FT_FRAGMENTATION << tp->frag_shift; + } + + if (tp->fcoe_shift >= 0 && fs->mask.fcoe) { + *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift; + fmask |= M_FT_FCOE << tp->fcoe_shift; + } + + /* A hashfilter must conform to the filterMask. */ + if (fmask != tp->hash_filter_mask) + return (EINVAL); + + return (0); +} + int set_filter(struct adapter *sc, struct t4_filter *t) { struct tid_info *ti = &sc->tids; struct l2t_entry *l2te; + uint64_t ftuple; int rc; /* @@ -516,8 +595,15 @@ set_filter(struct adapter *sc, struct t4_filter *t) if (t->fs.hash) { if (!is_hashfilter(sc) || ti->ntids == 0) return (ENOTSUP); + /* Hardware, not user, selects a tid for hashfilters. */ if (t->idx != (uint32_t)-1) - return (EINVAL); /* hw, not user picks the idx */ + return (EINVAL); + /* T5 can't count hashfilter hits. */ + if (is_t5(sc) && t->fs.hitcnts) + return (EINVAL); + rc = hashfilter_ntuple(sc, &t->fs, &ftuple); + if (rc != 0) + return (rc); } else { if (ti->nftids == 0) return (ENOTSUP); @@ -529,9 +615,10 @@ set_filter(struct adapter *sc, struct t4_filter *t) return (EINVAL); } - /* T4 doesn't support removing VLAN Tags for loop back filters. */ + /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */ if (is_t4(sc) && t->fs.action == FILTER_SWITCH && - (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE)) + (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE || + t->fs.swapmac || t->fs.nat_mode)) return (ENOTSUP); if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports) @@ -616,7 +703,7 @@ done: } if (t->fs.hash) - return (set_hashfilter(sc, t, l2te)); + return (set_hashfilter(sc, t, ftuple, l2te)); else return (set_tcamfilter(sc, t, l2te)); @@ -924,65 +1011,9 @@ done: return (0); } -static uint64_t -hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs) -{ - struct tp_params *tp = &sc->params.tp; - uint64_t ntuple = 0; - - /* - * Initialize each of the fields which we care about which are present - * in the Compressed Filter Tuple. - */ - if (tp->vlan_shift >= 0 && fs->mask.vlan) - ntuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift; - - if (tp->port_shift >= 0 && fs->mask.iport) - ntuple |= (uint64_t)fs->val.iport << tp->port_shift; - - if (tp->protocol_shift >= 0) { - if (!fs->val.proto) - ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; - else - ntuple |= (uint64_t)fs->val.proto << tp->protocol_shift; - } - - if (tp->tos_shift >= 0 && fs->mask.tos) - ntuple |= (uint64_t)(fs->val.tos) << tp->tos_shift; - - if (tp->vnic_shift >= 0) { -#ifdef notyet - if (tp->ingress_config & F_VNIC && fs->mask.pfvf_vld) - ntuple |= (uint64_t)((fs->val.pfvf_vld << 16) | - (fs->val.pf << 13) | - (fs->val.vf)) << tp->vnic_shift; - else -#endif - ntuple |= (uint64_t)((fs->val.ovlan_vld << 16) | - (fs->val.vnic)) << tp->vnic_shift; - } - - if (tp->macmatch_shift >= 0 && fs->mask.macidx) - ntuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift; - - if (tp->ethertype_shift >= 0 && fs->mask.ethtype) - ntuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift; - - if (tp->matchtype_shift >= 0 && fs->mask.matchtype) - ntuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift; - - if (tp->frag_shift >= 0 && fs->mask.frag) - ntuple |= (uint64_t)(fs->val.frag) << tp->frag_shift; - - if (tp->fcoe_shift >= 0 && fs->mask.fcoe) - ntuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift; - - return (ntuple); -} - static void mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid, - struct cpl_act_open_req6 *cpl) + uint64_t ftuple, struct cpl_act_open_req6 *cpl) { struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl; struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl; @@ -1011,18 +1042,22 @@ mk_act_open_req6(struct adapter *sc, struct filter_ent cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) | V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) | - V_NO_CONG(f->fs.rpttid) | F_TCAM_BYPASS | F_NON_OFFLOAD); + V_NO_CONG(f->fs.rpttid) | + V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) | + F_TCAM_BYPASS | F_NON_OFFLOAD); - cpl6->params = htobe64(V_FILTER_TUPLE(hashfilter_ntuple(sc, &f->fs))); + cpl6->params = htobe64(V_FILTER_TUPLE(ftuple)); cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) | - F_T5_OPT_2_VALID | F_RX_CHANNEL | + V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) | + V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID | + F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) | V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) | V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1))); } static void mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid, - struct cpl_act_open_req *cpl) + uint64_t ftuple, struct cpl_act_open_req *cpl) { struct cpl_t5_act_open_req *cpl5 = (void *)cpl; struct cpl_t6_act_open_req *cpl6 = (void *)cpl; @@ -1051,11 +1086,15 @@ mk_act_open_req(struct adapter *sc, struct filter_entr cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) | V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) | - V_NO_CONG(f->fs.rpttid) | F_TCAM_BYPASS | F_NON_OFFLOAD); + V_NO_CONG(f->fs.rpttid) | + V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) | + F_TCAM_BYPASS | F_NON_OFFLOAD); - cpl6->params = htobe64(V_FILTER_TUPLE(hashfilter_ntuple(sc, &f->fs))); + cpl6->params = htobe64(V_FILTER_TUPLE(ftuple)); cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) | - F_T5_OPT_2_VALID | F_RX_CHANNEL | + V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) | + V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID | + F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) | V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) | V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1))); } @@ -1086,7 +1125,8 @@ act_open_cpl_len16(struct adapter *sc, int isipv6) } static int -set_hashfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te) +set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple, + struct l2t_entry *l2te) { void *wr; struct wrq_cookie cookie; @@ -1137,9 +1177,9 @@ set_hashfilter(struct adapter *sc, struct t4_filter *t goto done; } if (f->fs.type) - mk_act_open_req6(sc, f, atid, wr); + mk_act_open_req6(sc, f, atid, ftuple, wr); else - mk_act_open_req(sc, f, atid, wr); + mk_act_open_req(sc, f, atid, ftuple, wr); f->locked = 1; /* ithread mustn't free f if ioctl is still around. */ f->pending = 1; @@ -1383,13 +1423,82 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t /* Set one of the t_flags bits in the TCB. */ static inline int -set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val) +set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val, + u_int no_reply) { return (set_tcb_field(sc, tid, W_TCB_T_FLAGS, 1ULL << bit_pos, - (uint64_t)val << bit_pos, 1)); + (uint64_t)val << bit_pos, no_reply)); } +#define WORD_MASK 0xffffffff +static void +set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip, + const bool sip, const bool dp, const bool sp) +{ + + if (dip) { + if (f->fs.type) { + set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK, + f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 | + f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1); + + set_tcb_field(sc, f->tid, + W_TCB_SND_UNA_RAW + 1, WORD_MASK, + f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 | + f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1); + + set_tcb_field(sc, f->tid, + W_TCB_SND_UNA_RAW + 2, WORD_MASK, + f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 | + f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1); + + set_tcb_field(sc, f->tid, + W_TCB_SND_UNA_RAW + 3, WORD_MASK, + f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 | + f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1); + } else { + set_tcb_field(sc, f->tid, + W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK, + f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 | + f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1); + } + } + + if (sip) { + if (f->fs.type) { + set_tcb_field(sc, f->tid, + W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK, + f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 | + f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1); + + set_tcb_field(sc, f->tid, + W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK, + f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 | + f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1); + + set_tcb_field(sc, f->tid, + W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK, + f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 | + f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1); + + set_tcb_field(sc, f->tid, + W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK, + f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 | + f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1); + + } else { + set_tcb_field(sc, f->tid, + W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK, + f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 | + f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1); + } + } + + set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK, + (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1); +} + /* * Returns EINPROGRESS to indicate that at least one TCB update was sent and the * last of the series of updates requested a reply. The reply informs the @@ -1406,12 +1515,83 @@ configure_hashfilter_tcb(struct adapter *sc, struct fi MPASS(f->valid == 0); if (f->fs.newdmac) { - set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1); + set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1); updated++; } if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) { - set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1); + set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1); + updated++; + } + + if (f->fs.newsmac) { + set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1); + set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL, + V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smtidx), + 1); + updated++; + } + + switch(f->fs.nat_mode) { + case NAT_MODE_NONE: + break; + case NAT_MODE_DIP: + set_nat_params(sc, f, true, false, false, false); + updated++; + break; + case NAT_MODE_DIP_DP: + set_nat_params(sc, f, true, false, true, false); + updated++; + break; + case NAT_MODE_DIP_DP_SIP: + set_nat_params(sc, f, true, true, true, false); + updated++; + break; + case NAT_MODE_DIP_DP_SP: + set_nat_params(sc, f, true, false, true, true); + updated++; + break; + case NAT_MODE_SIP_SP: + set_nat_params(sc, f, false, true, false, true); + updated++; + break; + case NAT_MODE_DIP_SIP_SP: + set_nat_params(sc, f, true, true, false, true); + updated++; + break; + case NAT_MODE_ALL: + set_nat_params(sc, f, true, true, true, true); + updated++; + break; + default: + MPASS(0); /* should have been validated earlier */ + break; + + } + + if (f->fs.nat_seq_chk) { + set_tcb_field(sc, f->tid, W_TCB_RCV_NXT, + V_TCB_RCV_NXT(M_TCB_RCV_NXT), + V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1); + updated++; + } + + if (is_t5(sc) && f->fs.action == FILTER_DROP) { + /* + * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop. + */ + set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) | + V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1); + updated++; + } + + /* + * Enable switching after all secondary resources (L2T entry, SMT entry, + * etc.) are setup so that any switched packet will use correct + * values. + */ + if (f->fs.action == FILTER_SWITCH) { + set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1); updated++; } Modified: head/sys/dev/cxgbe/tom/t4_tom.c ============================================================================== --- head/sys/dev/cxgbe/tom/t4_tom.c Tue May 15 02:26:50 2018 (r333619) +++ head/sys/dev/cxgbe/tom/t4_tom.c Tue May 15 04:24:38 2018 (r333620) @@ -656,7 +656,7 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e) if (tp->protocol_shift >= 0) ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; - if (tp->vnic_shift >= 0) { + if (tp->vnic_shift >= 0 && tp->ingress_config & F_VNIC) { uint32_t vf = G_FW_VIID_VIN(viid); uint32_t pf = G_FW_VIID_PFN(viid); uint32_t vld = G_FW_VIID_VIVLD(viid);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201805150424.w4F4OcBV088548>