Date: Mon, 7 May 2018 21:42:23 +0000 (UTC) From: Stephen Hurd <shurd@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org Subject: svn commit: r333338 - in stable/11/sys: dev/bnxt kern net sys Message-ID: <201805072142.w47LgN1R041002@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: shurd Date: Mon May 7 21:42:22 2018 New Revision: 333338 URL: https://svnweb.freebsd.org/changeset/base/333338 Log: Merge iflib changes to 11-STABLE MFC r300147, r300153, r300154, r300215, r301563, r301567, r302372, r307560, r307562, r307563, r307568, r308792, r311039, r311837, r312755, r312903, r312905, r312924, r313248, r315217, r315245, r315288, r316278, r316281, r316502, r316596, r317756, r319917, r319921, r319984, r319989, r320059, r320609, r320611, r321253, r321629, r321630, r322337, r322338, r322823, r323077, r323825, r323876, r323879, r323887, r323941, r323942, r323943, r323944, r323954, r324038, r324318, r324937, r325166, r325167, r325168, r325201, r325241, r325245, r325487, r325494, r325901, r326033, r326369, r326370, r326432, r326577, r326578, r326702, r326706, r326775, r327013, r327017, r327052, r327072, r327098, r327242, r327244, r327247, r329651, r329742, r330289, r330715, r330721, r332419, r332422, r332729 Reviewed by: sbruno Approved by: re (delphij@) Sponsored by: Limelight Networks Differential Revision: https://reviews.freebsd.org/D15142 Modified: stable/11/sys/dev/bnxt/bnxt.h stable/11/sys/dev/bnxt/bnxt_hwrm.c stable/11/sys/dev/bnxt/bnxt_txrx.c stable/11/sys/dev/bnxt/if_bnxt.c stable/11/sys/kern/kern_cpuset.c stable/11/sys/kern/kern_intr.c stable/11/sys/kern/subr_gtaskqueue.c stable/11/sys/net/ifdi_if.m stable/11/sys/net/iflib.c stable/11/sys/net/iflib.h stable/11/sys/net/mp_ring.c stable/11/sys/sys/_task.h stable/11/sys/sys/cpuset.h stable/11/sys/sys/interrupt.h Directory Properties: stable/11/ (props changed) Modified: stable/11/sys/dev/bnxt/bnxt.h ============================================================================== --- stable/11/sys/dev/bnxt/bnxt.h Mon May 7 21:32:08 2018 (r333337) +++ stable/11/sys/dev/bnxt/bnxt.h Mon May 7 21:42:22 2018 (r333338) @@ -203,6 +203,8 @@ __FBSDID("$FreeBSD$"); /* Chip info */ #define BNXT_TSO_SIZE UINT16_MAX +#define BNXT_MIN_FRAME_SIZE 52 /* Frames must be padded to this size for some A0 chips */ + /* NVRAM access */ enum bnxt_nvm_directory_type { BNX_DIR_TYPE_UNUSED = 0, @@ -427,6 +429,7 @@ struct bnxt_ring { uint32_t ring_size; /* Must be a power of two */ uint16_t id; /* Logical ID */ uint16_t phys_id; + struct bnxt_full_tpa_start *tpa_start; }; struct bnxt_cp_ring { @@ -552,7 +555,6 @@ struct bnxt_softc { struct sysctl_ctx_list hw_stats; struct sysctl_oid *hw_stats_oid; - struct bnxt_full_tpa_start *tpa_start; struct bnxt_ver_info *ver_info; struct bnxt_nvram_info *nvm_info; }; Modified: stable/11/sys/dev/bnxt/bnxt_hwrm.c ============================================================================== --- stable/11/sys/dev/bnxt/bnxt_hwrm.c Mon May 7 21:32:08 2018 (r333337) +++ stable/11/sys/dev/bnxt/bnxt_hwrm.c Mon May 7 21:42:22 2018 (r333338) @@ -931,7 +931,7 @@ bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc, struc /* TODO: Calculate this based on ring size? */ req.max_agg_segs = htole16(3); /* Base this in the allocated TPA start size... */ - req.max_aggs = htole16(2); + req.max_aggs = htole16(7); /* * TODO: max_agg_timer? * req.mag_agg_timer = htole32(XXX); Modified: stable/11/sys/dev/bnxt/bnxt_txrx.c ============================================================================== --- stable/11/sys/dev/bnxt/bnxt_txrx.c Mon May 7 21:32:08 2018 (r333337) +++ stable/11/sys/dev/bnxt/bnxt_txrx.c Mon May 7 21:42:22 2018 (r333338) @@ -48,17 +48,19 @@ __FBSDID("$FreeBSD$"); */ static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi); -static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, uint32_t pidx); -static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, uint32_t cidx, - bool clear); +static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx); +static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear); -static void bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t flid, +static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru); + +/* uint16_t rxqid, uint8_t flid, uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count, uint16_t buf_size); +*/ static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid, - uint32_t pidx); -static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, uint32_t idx, - int budget); + qidx_t pidx); +static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, + qidx_t budget); static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri); static int bnxt_intr(void *sc); @@ -172,7 +174,7 @@ bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi) } static void -bnxt_isc_txd_flush(void *sc, uint16_t txqid, uint32_t pidx) +bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_ring *tx_ring = &softc->tx_rings[txqid]; @@ -185,7 +187,7 @@ bnxt_isc_txd_flush(void *sc, uint16_t txqid, uint32_t } static int -bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, uint32_t idx, bool clear) +bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid]; @@ -249,16 +251,30 @@ done: } static void -bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t flid, - uint32_t pidx, uint64_t *paddrs, - caddr_t *vaddrs, uint16_t count, uint16_t len) +bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_ring *rx_ring; struct rx_prod_pkt_bd *rxbd; uint16_t type; uint16_t i; + uint16_t rxqid; + uint16_t count, len; + uint32_t pidx; + uint8_t flid; + uint64_t *paddrs; + caddr_t *vaddrs; + qidx_t *frag_idxs; + rxqid = iru->iru_qsidx; + count = iru->iru_count; + len = iru->iru_buf_size; + pidx = iru->iru_pidx; + flid = iru->iru_flidx; + vaddrs = iru->iru_vaddrs; + paddrs = iru->iru_paddrs; + frag_idxs = iru->iru_idxs; + if (flid == 0) { rx_ring = &softc->rx_rings[rxqid]; type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; @@ -273,8 +289,8 @@ bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t rxbd[pidx].flags_type = htole16(type); rxbd[pidx].len = htole16(len); /* No need to byte-swap the opaque value */ - rxbd[pidx].opaque = ((rxqid & 0xff) << 24) | (flid << 16) - | pidx; + rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16) + | (frag_idxs[i])); rxbd[pidx].addr = htole64(paddrs[i]); if (++pidx == rx_ring->ring_size) pidx = 0; @@ -284,7 +300,7 @@ bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid, - uint32_t pidx) + qidx_t pidx) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_ring *rx_ring; @@ -310,12 +326,11 @@ bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t f } static int -bnxt_isc_rxd_available(void *sc, uint16_t rxqid, uint32_t idx, int budget) +bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid]; struct rx_pkt_cmpl *rcp; - struct rx_tpa_start_cmpl *rtpa; struct rx_tpa_end_cmpl *rtpae; struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr; int avail = 0; @@ -324,7 +339,6 @@ bnxt_isc_rxd_available(void *sc, uint16_t rxqid, uint3 uint8_t ags; int i; uint16_t type; - uint8_t agg_id; for (;;) { NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); @@ -374,18 +388,11 @@ bnxt_isc_rxd_available(void *sc, uint16_t rxqid, uint3 avail++; break; case CMPL_BASE_TYPE_RX_TPA_START: - rtpa = (void *)&cmp[cons]; - agg_id = (rtpa->agg_id & - RX_TPA_START_CMPL_AGG_ID_MASK) >> - RX_TPA_START_CMPL_AGG_ID_SFT; - softc->tpa_start[agg_id].low = *rtpa; NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); CMPL_PREFETCH_NEXT(cpr, cons); if (!CMP_VALID(&cmp[cons], v_bit)) goto cmpl_invalid; - softc->tpa_start[agg_id].high = - ((struct rx_tpa_start_cmpl_hi *)cmp)[cons]; break; case CMPL_BASE_TYPE_RX_AGG: break; @@ -510,7 +517,7 @@ bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info /* Get the agg_id */ agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> RX_TPA_END_CMPL_AGG_ID_SFT; - tpas = &softc->tpa_start[agg_id]; + tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]); /* Extract from the first 16-byte BD */ if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) { @@ -530,8 +537,8 @@ bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info RX_TPA_END_CMPL_AGG_BUFS_SFT; ri->iri_nfrags = ags + 1; /* No need to byte-swap the opaque value */ - ri->iri_frags[0].irf_flid = (tpas->low.opaque >> 16) & 0xff; - ri->iri_frags[0].irf_idx = tpas->low.opaque & 0xffff; + ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff); + ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff); ri->iri_frags[0].irf_len = le16toh(tpas->low.len); ri->iri_len = le16toh(tpas->low.len); @@ -567,8 +574,8 @@ bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons]; /* No need to byte-swap the opaque value */ - ri->iri_frags[i].irf_flid = (acp->opaque >> 16) & 0xff; - ri->iri_frags[i].irf_idx = acp->opaque & 0xffff; + ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff); + ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff); ri->iri_frags[i].irf_len = le16toh(acp->len); ri->iri_len += le16toh(acp->len); } @@ -576,8 +583,8 @@ bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info /* And finally, the empty BD at the end... */ ri->iri_nfrags++; /* No need to byte-swap the opaque value */ - ri->iri_frags[i].irf_flid = (agend->opaque >> 16) % 0xff; - ri->iri_frags[i].irf_idx = agend->opaque & 0xffff; + ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff); + ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff); ri->iri_frags[i].irf_len = le16toh(agend->len); ri->iri_len += le16toh(agend->len); @@ -590,9 +597,12 @@ bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx]; + struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr; struct cmpl_base *cmp; + struct rx_tpa_start_cmpl *rtpa; uint16_t flags_type; uint16_t type; + uint8_t agg_id; for (;;) { NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); @@ -609,9 +619,18 @@ bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri) case CMPL_BASE_TYPE_RX_TPA_END: return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type); case CMPL_BASE_TYPE_RX_TPA_START: + rtpa = (void *)&cmp_q[cpr->cons]; + agg_id = (rtpa->agg_id & + RX_TPA_START_CMPL_AGG_ID_MASK) >> + RX_TPA_START_CMPL_AGG_ID_SFT; + softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa; + NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); CMPL_PREFETCH_NEXT(cpr, cpr->cons); + + softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high = + ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons]; break; default: device_printf(softc->dev, Modified: stable/11/sys/dev/bnxt/if_bnxt.c ============================================================================== --- stable/11/sys/dev/bnxt/if_bnxt.c Mon May 7 21:32:08 2018 (r333337) +++ stable/11/sys/dev/bnxt/if_bnxt.c Mon May 7 21:42:22 2018 (r333338) @@ -235,6 +235,8 @@ MODULE_DEPEND(bnxt, pci, 1, 1, 1); MODULE_DEPEND(bnxt, ether, 1, 1, 1); MODULE_DEPEND(bnxt, iflib, 1, 1, 1); +IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array); + static device_method_t bnxt_iflib_methods[] = { DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc), @@ -255,7 +257,8 @@ static device_method_t bnxt_iflib_methods[] = { DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status), DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable), - DEVMETHOD(ifdi_queue_intr_enable, bnxt_queue_intr_enable), + DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_queue_intr_enable), + DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_queue_intr_enable), DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr), DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign), @@ -279,10 +282,9 @@ char bnxt_driver_version[] = "FreeBSD base"; extern struct if_txrx bnxt_txrx; static struct if_shared_ctx bnxt_sctx_init = { .isc_magic = IFLIB_MAGIC, - .isc_txrx = &bnxt_txrx, .isc_driver = &bnxt_iflib_driver, .isc_nfl = 2, // Number of Free Lists - .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ, + .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = BNXT_TSO_SIZE, .isc_tx_maxsegsize = BNXT_TSO_SIZE, @@ -494,6 +496,17 @@ bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1]; softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1]; + /* Allocate the TPA start buffer */ + softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) * + (RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT), + M_DEVBUF, M_NOWAIT | M_ZERO); + if (softc->rx_rings[i].tpa_start == NULL) { + rc = -ENOMEM; + device_printf(softc->dev, + "Unable to allocate space for TPA\n"); + goto tpa_alloc_fail; + } + /* Allocate the AG ring */ softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->ag_rings[i].softc = softc; @@ -559,7 +572,10 @@ rss_grp_alloc_fail: iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl); rss_hash_alloc_fail: iflib_dma_free(&softc->vnic_info.mc_list); +tpa_alloc_fail: mc_list_alloc_fail: + for (i = i - 1; i >= 0; i--) + free(softc->rx_rings[i].tpa_start, M_DEVBUF); iflib_dma_free(&softc->rx_stats); hw_stats_alloc_fail: free(softc->grp_info, M_DEVBUF); @@ -623,16 +639,6 @@ bnxt_attach_pre(if_ctx_t ctx) if (rc) goto dma_fail; - /* Allocate the TPA start buffer */ - softc->tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) * - (RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT), - M_DEVBUF, M_NOWAIT | M_ZERO); - if (softc->tpa_start == NULL) { - rc = ENOMEM; - device_printf(softc->dev, - "Unable to allocate space for TPA\n"); - goto tpa_failed; - } /* Get firmware version and compare with driver */ softc->ver_info = malloc(sizeof(struct bnxt_ver_info), @@ -681,6 +687,20 @@ bnxt_attach_pre(if_ctx_t ctx) goto failed; iflib_set_mac(ctx, softc->func.mac_addr); + scctx->isc_txrx = &bnxt_txrx; + scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP | + CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO); + scctx->isc_capenable = + /* These are translated to hwassit bits */ + IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 | + /* These are checked by iflib */ + IFCAP_LRO | IFCAP_VLAN_HWFILTER | + /* These are part of the iflib mask */ + IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU | + IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | + /* These likely get lost... */ + IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU; + /* Get the queue config */ rc = bnxt_hwrm_queue_qportcfg(softc); if (rc) { @@ -700,6 +720,9 @@ bnxt_attach_pre(if_ctx_t ctx) scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE; scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE; scctx->isc_vectors = softc->func.max_cp_rings; + scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE; + scctx->isc_txrx = &bnxt_txrx; + if (scctx->isc_nrxd[0] < ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2])) device_printf(softc->dev, @@ -717,12 +740,12 @@ bnxt_attach_pre(if_ctx_t ctx) scctx->isc_nrxd[1]; scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) * scctx->isc_nrxd[2]; - scctx->isc_max_rxqsets = min(pci_msix_count(softc->dev)-1, + scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1, softc->func.max_cp_rings - 1); - scctx->isc_max_rxqsets = min(scctx->isc_max_rxqsets, + scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max, softc->func.max_rx_rings); - scctx->isc_max_txqsets = min(softc->func.max_rx_rings, - softc->func.max_cp_rings - scctx->isc_max_rxqsets - 1); + scctx->isc_ntxqsets_max = min(softc->func.max_rx_rings, + softc->func.max_cp_rings - scctx->isc_nrxqsets_max - 1); scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE; scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1; @@ -780,8 +803,6 @@ nvm_alloc_fail: ver_fail: free(softc->ver_info, M_DEVBUF); ver_alloc_fail: - free(softc->tpa_start, M_DEVBUF); -tpa_failed: bnxt_free_hwrm_dma_mem(softc); dma_fail: BNXT_HWRM_LOCK_DESTROY(softc); @@ -795,7 +816,6 @@ bnxt_attach_post(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); - int capabilities, enabling; int rc; bnxt_create_config_sysctls_post(softc); @@ -810,26 +830,6 @@ bnxt_attach_post(if_ctx_t ctx) bnxt_add_media_types(softc); ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO); - if_sethwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_TCP_IPV6 | - CSUM_UDP_IPV6 | CSUM_TSO)); - - capabilities = - /* These are translated to hwassit bits */ - IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 | - /* These are checked by iflib */ - IFCAP_LRO | IFCAP_VLAN_HWFILTER | - /* These are part of the iflib mask */ - IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU | - IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | - /* These likely get lost... */ - IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU; - - if_setcapabilities(ifp, capabilities); - - enabling = capabilities; - - if_setcapenable(ifp, enabling); - softc->scctx->isc_max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; @@ -863,7 +863,8 @@ bnxt_detach(if_ctx_t ctx) SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp) free(tag, M_DEVBUF); iflib_dma_free(&softc->def_cp_ring_mem); - free(softc->tpa_start, M_DEVBUF); + for (i = 0; i < softc->nrxqsets; i++) + free(softc->rx_rings[i].tpa_start, M_DEVBUF); free(softc->ver_info, M_DEVBUF); free(softc->nvm_info, M_DEVBUF); @@ -995,14 +996,17 @@ bnxt_init(if_ctx_t ctx) if (rc) goto fail; -#ifdef notyet - /* Enable LRO/TPA/GRO */ + /* + * Enable LRO/TPA/GRO + * TBD: + * Enable / Disable HW_LRO based on + * ifconfig lro / ifconfig -lro setting + */ rc = bnxt_hwrm_vnic_tpa_cfg(softc, &softc->vnic_info, (if_getcapenable(iflib_get_ifp(ctx)) & IFCAP_LRO) ? HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA : 0); if (rc) goto fail; -#endif for (i = 0; i < softc->ntxqsets; i++) { /* Allocate the statistics context */ @@ -1489,7 +1493,7 @@ bnxt_msix_intr_assign(if_ctx_t ctx, int msix) for (i=0; i<softc->scctx->isc_nrxqsets; i++) { rc = iflib_irq_alloc_generic(ctx, &softc->rx_cp_rings[i].irq, - softc->rx_cp_rings[i].ring.id + 1, IFLIB_INTR_RX, + softc->rx_cp_rings[i].ring.id + 1, IFLIB_INTR_RXTX, bnxt_handle_rx_cp, &softc->rx_cp_rings[i], i, "rx_cp"); if (rc) { device_printf(iflib_get_dev(ctx), @@ -1500,8 +1504,7 @@ bnxt_msix_intr_assign(if_ctx_t ctx, int msix) } for (i=0; i<softc->scctx->isc_ntxqsets; i++) - iflib_softirq_alloc_generic(ctx, i + 1, IFLIB_INTR_TX, NULL, i, - "tx_cp"); + iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp"); return rc; Modified: stable/11/sys/kern/kern_cpuset.c ============================================================================== --- stable/11/sys/kern/kern_cpuset.c Mon May 7 21:32:08 2018 (r333337) +++ stable/11/sys/kern/kern_cpuset.c Mon May 7 21:42:22 2018 (r333338) @@ -1127,6 +1127,8 @@ kern_cpuset_getaffinity(struct thread *td, cpulevel_t case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; @@ -1157,7 +1159,9 @@ kern_cpuset_getaffinity(struct thread *td, cpulevel_t CPU_COPY(&set->cs_mask, mask); break; case CPU_WHICH_IRQ: - error = intr_getaffinity(id, mask); + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: + error = intr_getaffinity(id, which, mask); break; case CPU_WHICH_DOMAIN: if (id < 0 || id >= MAXMEMDOM) @@ -1260,6 +1264,8 @@ kern_cpuset_setaffinity(struct thread *td, cpulevel_t case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; @@ -1289,7 +1295,9 @@ kern_cpuset_setaffinity(struct thread *td, cpulevel_t } break; case CPU_WHICH_IRQ: - error = intr_setaffinity(id, mask); + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: + error = intr_setaffinity(id, which, mask); break; default: error = EINVAL; Modified: stable/11/sys/kern/kern_intr.c ============================================================================== --- stable/11/sys/kern/kern_intr.c Mon May 7 21:32:08 2018 (r333337) +++ stable/11/sys/kern/kern_intr.c Mon May 7 21:42:22 2018 (r333338) @@ -287,13 +287,11 @@ intr_event_create(struct intr_event **event, void *sou /* * Bind an interrupt event to the specified CPU. Note that not all * platforms support binding an interrupt to a CPU. For those - * platforms this request will fail. For supported platforms, any - * associated ithreads as well as the primary interrupt context will - * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds + * platforms this request will fail. Using a cpu id of NOCPU unbinds * the interrupt event. */ -int -intr_event_bind(struct intr_event *ie, int cpu) +static int +_intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) { lwpid_t id; int error; @@ -313,35 +311,75 @@ intr_event_bind(struct intr_event *ie, int cpu) * If we have any ithreads try to set their mask first to verify * permissions, etc. */ - mtx_lock(&ie->ie_lock); - if (ie->ie_thread != NULL) { - id = ie->ie_thread->it_thread->td_tid; - mtx_unlock(&ie->ie_lock); - error = cpuset_setithread(id, cpu); - if (error) - return (error); - } else - mtx_unlock(&ie->ie_lock); - error = ie->ie_assign_cpu(ie->ie_source, cpu); - if (error) { + if (bindithread) { mtx_lock(&ie->ie_lock); if (ie->ie_thread != NULL) { - cpu = ie->ie_cpu; id = ie->ie_thread->it_thread->td_tid; mtx_unlock(&ie->ie_lock); - (void)cpuset_setithread(id, cpu); + error = cpuset_setithread(id, cpu); + if (error) + return (error); } else mtx_unlock(&ie->ie_lock); + } + if (bindirq) + error = ie->ie_assign_cpu(ie->ie_source, cpu); + if (error) { + if (bindithread) { + mtx_lock(&ie->ie_lock); + if (ie->ie_thread != NULL) { + cpu = ie->ie_cpu; + id = ie->ie_thread->it_thread->td_tid; + mtx_unlock(&ie->ie_lock); + (void)cpuset_setithread(id, cpu); + } else + mtx_unlock(&ie->ie_lock); + } return (error); } - mtx_lock(&ie->ie_lock); - ie->ie_cpu = cpu; - mtx_unlock(&ie->ie_lock); + if (bindirq) { + mtx_lock(&ie->ie_lock); + ie->ie_cpu = cpu; + mtx_unlock(&ie->ie_lock); + } return (error); } +/* + * Bind an interrupt event to the specified CPU. For supported platforms, any + * associated ithreads as well as the primary interrupt context will be bound + * to the specificed CPU. + */ +int +intr_event_bind(struct intr_event *ie, int cpu) +{ + + return (_intr_event_bind(ie, cpu, true, true)); +} + +/* + * Bind an interrupt event to the specified CPU, but do not bind associated + * ithreads. + */ +int +intr_event_bind_irqonly(struct intr_event *ie, int cpu) +{ + + return (_intr_event_bind(ie, cpu, true, false)); +} + +/* + * Bind an interrupt event's ithread to the specified CPU. + */ +int +intr_event_bind_ithread(struct intr_event *ie, int cpu) +{ + + return (_intr_event_bind(ie, cpu, false, true)); +} + static struct intr_event * intr_lookup(int irq) { @@ -358,7 +396,7 @@ intr_lookup(int irq) } int -intr_setaffinity(int irq, void *m) +intr_setaffinity(int irq, int mode, void *m) { struct intr_event *ie; cpuset_t *mask; @@ -382,26 +420,62 @@ intr_setaffinity(int irq, void *m) ie = intr_lookup(irq); if (ie == NULL) return (ESRCH); - return (intr_event_bind(ie, cpu)); + switch (mode) { + case CPU_WHICH_IRQ: + return (intr_event_bind(ie, cpu)); + case CPU_WHICH_INTRHANDLER: + return (intr_event_bind_irqonly(ie, cpu)); + case CPU_WHICH_ITHREAD: + return (intr_event_bind_ithread(ie, cpu)); + default: + return (EINVAL); + } } int -intr_getaffinity(int irq, void *m) +intr_getaffinity(int irq, int mode, void *m) { struct intr_event *ie; + struct thread *td; + struct proc *p; cpuset_t *mask; + lwpid_t id; + int error; mask = m; ie = intr_lookup(irq); if (ie == NULL) return (ESRCH); + + error = 0; CPU_ZERO(mask); - mtx_lock(&ie->ie_lock); - if (ie->ie_cpu == NOCPU) - CPU_COPY(cpuset_root, mask); - else - CPU_SET(ie->ie_cpu, mask); - mtx_unlock(&ie->ie_lock); + switch (mode) { + case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + mtx_lock(&ie->ie_lock); + if (ie->ie_cpu == NOCPU) + CPU_COPY(cpuset_root, mask); + else + CPU_SET(ie->ie_cpu, mask); + mtx_unlock(&ie->ie_lock); + break; + case CPU_WHICH_ITHREAD: + mtx_lock(&ie->ie_lock); + if (ie->ie_thread == NULL) { + mtx_unlock(&ie->ie_lock); + CPU_COPY(cpuset_root, mask); + } else { + id = ie->ie_thread->it_thread->td_tid; + mtx_unlock(&ie->ie_lock); + error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); + if (error != 0) + return (error); + CPU_COPY(&td->td_cpuset->cs_mask, mask); + PROC_UNLOCK(p); + } + default: + return (EINVAL); + } return (0); } Modified: stable/11/sys/kern/subr_gtaskqueue.c ============================================================================== --- stable/11/sys/kern/subr_gtaskqueue.c Mon May 7 21:32:08 2018 (r333337) +++ stable/11/sys/kern/subr_gtaskqueue.c Mon May 7 21:42:22 2018 (r333338) @@ -48,7 +48,7 @@ __FBSDID("$FreeBSD$"); #include <sys/unistd.h> #include <machine/stdarg.h> -static MALLOC_DEFINE(M_GTASKQUEUE, "taskqueue", "Task Queues"); +static MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues"); static void gtaskqueue_thread_enqueue(void *); static void gtaskqueue_thread_loop(void *arg); @@ -134,8 +134,10 @@ _gtaskqueue_create(const char *name, int mflags, snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue"); queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO); - if (!queue) + if (!queue) { + free(tq_name, M_GTASKQUEUE); return (NULL); + } STAILQ_INIT(&queue->tq_queue); TAILQ_INIT(&queue->tq_active); @@ -663,10 +665,10 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct gr void *uniq, int irq, char *name) { cpuset_t mask; - int qid; + int qid, error; gtask->gt_uniq = uniq; - gtask->gt_name = name; + snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask"); gtask->gt_irq = irq; gtask->gt_cpu = -1; mtx_lock(&qgroup->tqg_lock); @@ -679,7 +681,9 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct gr CPU_ZERO(&mask); CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask); mtx_unlock(&qgroup->tqg_lock); - intr_setaffinity(irq, &mask); + error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask); + if (error) + printf("%s: setaffinity failed for %s: %d\n", __func__, gtask->gt_name, error); } else mtx_unlock(&qgroup->tqg_lock); } @@ -688,7 +692,7 @@ static void taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) { cpuset_t mask; - int qid, cpu; + int qid, cpu, error; mtx_lock(&qgroup->tqg_lock); qid = taskqgroup_find(qgroup, gtask->gt_uniq); @@ -698,9 +702,11 @@ taskqgroup_attach_deferred(struct taskqgroup *qgroup, CPU_ZERO(&mask); CPU_SET(cpu, &mask); - intr_setaffinity(gtask->gt_irq, &mask); - + error = intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask); mtx_lock(&qgroup->tqg_lock); + if (error) + printf("%s: %s setaffinity failed: %d\n", __func__, gtask->gt_name, error); + } qgroup->tqg_queue[qid].tgc_cnt++; @@ -716,11 +722,11 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struc void *uniq, int cpu, int irq, char *name) { cpuset_t mask; - int i, qid; + int i, qid, error; qid = -1; gtask->gt_uniq = uniq; - gtask->gt_name = name; + snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask"); gtask->gt_irq = irq; gtask->gt_cpu = cpu; mtx_lock(&qgroup->tqg_lock); @@ -732,6 +738,7 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struc } if (qid == -1) { mtx_unlock(&qgroup->tqg_lock); + printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu); return (EINVAL); } } else @@ -744,8 +751,11 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struc CPU_ZERO(&mask); CPU_SET(cpu, &mask); - if (irq != -1 && tqg_smp_started) - intr_setaffinity(irq, &mask); + if (irq != -1 && tqg_smp_started) { + error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask); + if (error) + printf("%s: setaffinity failed: %d\n", __func__, error); + } return (0); } @@ -753,7 +763,7 @@ static int taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) { cpuset_t mask; - int i, qid, irq, cpu; + int i, qid, irq, cpu, error; qid = -1; irq = gtask->gt_irq; @@ -767,6 +777,7 @@ taskqgroup_attach_cpu_deferred(struct taskqgroup *qgro } if (qid == -1) { mtx_unlock(&qgroup->tqg_lock); + printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu); return (EINVAL); } qgroup->tqg_queue[qid].tgc_cnt++; @@ -778,8 +789,11 @@ taskqgroup_attach_cpu_deferred(struct taskqgroup *qgro CPU_ZERO(&mask); CPU_SET(cpu, &mask); - if (irq != -1) - intr_setaffinity(irq, &mask); + if (irq != -1) { + error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask); + if (error) + printf("%s: setaffinity failed: %d\n", __func__, error); + } return (0); } @@ -793,7 +807,7 @@ taskqgroup_detach(struct taskqgroup *qgroup, struct gr if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue) break; if (i == qgroup->tqg_cnt) - panic("taskqgroup_detach: task not in group\n"); + panic("taskqgroup_detach: task %s not in group\n", gtask->gt_name); qgroup->tqg_queue[i].tgc_cnt--; LIST_REMOVE(gtask, gt_list); mtx_unlock(&qgroup->tqg_lock); @@ -815,7 +829,7 @@ taskqgroup_binder(void *ctx) thread_unlock(curthread); if (error) - printf("taskqgroup_binder: setaffinity failed: %d\n", + printf("%s: setaffinity failed: %d\n", __func__, error); free(gtask, M_DEVBUF); } @@ -858,7 +872,7 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, return (EINVAL); } if (qgroup->tqg_adjusting) { - printf("taskqgroup_adjust failed: adjusting\n"); + printf("%s failed: adjusting\n", __func__); return (EBUSY); } qgroup->tqg_adjusting = 1; Modified: stable/11/sys/net/ifdi_if.m ============================================================================== --- stable/11/sys/net/ifdi_if.m Mon May 7 21:32:08 2018 (r333337) +++ stable/11/sys/net/ifdi_if.m Mon May 7 21:42:22 2018 (r333338) @@ -195,11 +195,16 @@ METHOD void intr_disable { if_ctx_t _ctx; }; -METHOD int queue_intr_enable { +METHOD int rx_queue_intr_enable { if_ctx_t _ctx; uint16_t _qid; } DEFAULT null_queue_intr_enable; +METHOD int tx_queue_intr_enable { + if_ctx_t _ctx; + uint16_t _qid; +} DEFAULT null_queue_intr_enable; + METHOD void link_intr_enable { if_ctx_t _ctx; } DEFAULT null_void_op; @@ -229,6 +234,7 @@ METHOD int promisc_set { METHOD void crcstrip_set { if_ctx_t _ctx; int _onoff; + int _strip; }; # @@ -332,4 +338,6 @@ METHOD int sysctl_int_delay { if_int_delay_info_t _iidi; } DEFAULT null_sysctl_int_delay; - +METHOD void debug { + if_ctx_t _ctx; +} DEFAULT null_void_op; Modified: stable/11/sys/net/iflib.c ============================================================================== --- stable/11/sys/net/iflib.c Mon May 7 21:32:08 2018 (r333337) +++ stable/11/sys/net/iflib.c Mon May 7 21:42:22 2018 (r333338) @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2014-2016, Matthew Macy <mmacy@nextbsd.org> + * Copyright (c) 2014-2017, Matthew Macy <mmacy@nextbsd.org> * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,6 +31,7 @@ __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_acpi.h" +#include "opt_sched.h" #include <sys/param.h> #include <sys/types.h> @@ -59,6 +60,7 @@ __FBSDID("$FreeBSD$"); #include <net/bpf.h> #include <net/ethernet.h> #include <net/mp_ring.h> +#include <net/vnet.h> #include <netinet/in.h> #include <netinet/in_pcb.h> @@ -68,6 +70,8 @@ __FBSDID("$FreeBSD$"); #include <netinet/ip.h> #include <netinet/ip6.h> #include <netinet/tcp.h> +#include <netinet/ip_var.h> +#include <netinet6/ip6_var.h> #include <machine/bus.h> #include <machine/in_cksum.h> @@ -93,9 +97,10 @@ __FBSDID("$FreeBSD$"); #include <x86/iommu/busdma_dmar.h> #endif - +#include <sys/bitstring.h> /* - * enable accounting of every mbuf as it comes in to and goes out of iflib's software descriptor references + * enable accounting of every mbuf as it comes in to and goes out of + * iflib's software descriptor references */ #define MEMORY_LOGGING 0 /* @@ -134,10 +139,13 @@ typedef struct iflib_rxq *iflib_rxq_t; struct iflib_fl; typedef struct iflib_fl *iflib_fl_t; +static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); + typedef struct iflib_filter_info { driver_filter_t *ifi_filter; void *ifi_filter_arg; struct grouptask *ifi_task; + void *ifi_ctx; } *iflib_filter_info_t; struct iflib_ctx { @@ -156,7 +164,6 @@ struct iflib_ctx { struct mtx ifc_mtx; uint16_t ifc_nhwtxqs; - uint16_t ifc_nhwrxqs; iflib_txq_t ifc_txqs; iflib_rxq_t ifc_rxqs; @@ -167,7 +174,6 @@ struct iflib_ctx { int ifc_link_state; int ifc_link_irq; - int ifc_pause_frames; int ifc_watchdog_events; struct cdev *ifc_led_dev; struct resource *ifc_msix_mem; @@ -182,9 +188,10 @@ struct iflib_ctx { uint16_t ifc_sysctl_ntxqs; uint16_t ifc_sysctl_nrxqs; uint16_t ifc_sysctl_qs_eq_override; + uint16_t ifc_sysctl_rx_budget; - uint16_t ifc_sysctl_ntxds[8]; - uint16_t ifc_sysctl_nrxds[8]; + qidx_t ifc_sysctl_ntxds[8]; + qidx_t ifc_sysctl_nrxds[8]; struct if_txrx ifc_txrx; #define isc_txd_encap ifc_txrx.ift_txd_encap #define isc_txd_flush ifc_txrx.ift_txd_flush @@ -252,7 +259,9 @@ iflib_get_sctx(if_ctx_t ctx) return (ctx->ifc_sctx); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201805072142.w47LgN1R041002>