From owner-svn-src-stable-12@freebsd.org Tue Aug 4 16:06:59 2020 Return-Path: Delivered-To: svn-src-stable-12@mailman.nyi.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.nyi.freebsd.org (Postfix) with ESMTP id 546DB378A65; Tue, 4 Aug 2020 16:06:59 +0000 (UTC) (envelope-from markj@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest SHA256 client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 4BLflR1YVnz4XVj; Tue, 4 Aug 2020 16:06:59 +0000 (UTC) (envelope-from markj@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 1A13227E5D; Tue, 4 Aug 2020 16:06:59 +0000 (UTC) (envelope-from markj@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id 074G6wI0068480; Tue, 4 Aug 2020 16:06:58 GMT (envelope-from markj@FreeBSD.org) Received: (from markj@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id 074G6wrr068478; Tue, 4 Aug 2020 16:06:58 GMT (envelope-from markj@FreeBSD.org) Message-Id: <202008041606.074G6wrr068478@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: markj set sender to markj@FreeBSD.org using -f From: Mark Johnston Date: Tue, 4 Aug 2020 16:06:58 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org Subject: svn commit: r363845 - stable/12/sys/net X-SVN-Group: stable-12 X-SVN-Commit-Author: markj X-SVN-Commit-Paths: stable/12/sys/net X-SVN-Commit-Revision: 363845 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-stable-12@freebsd.org X-Mailman-Version: 2.1.33 Precedence: list List-Id: SVN commit messages for only the 12-stable src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 04 Aug 2020 16:06:59 -0000 Author: markj Date: Tue Aug 4 16:06:58 2020 New Revision: 363845 URL: https://svnweb.freebsd.org/changeset/base/363845 Log: MFC r362962, r362963: iflib: Fix handling of mbuf cluster allocation failures. Modified: stable/12/sys/net/iflib.c stable/12/sys/net/iflib.h Directory Properties: stable/12/ (props changed) Modified: stable/12/sys/net/iflib.c ============================================================================== --- stable/12/sys/net/iflib.c Tue Aug 4 15:48:12 2020 (r363844) +++ stable/12/sys/net/iflib.c Tue Aug 4 16:06:58 2020 (r363845) @@ -207,8 +207,6 @@ struct iflib_ctx { #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_flush ifc_txrx.ift_rxd_flush -#define isc_rxd_refill ifc_txrx.ift_rxd_refill -#define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_legacy_intr ifc_txrx.ift_legacy_intr eventhandler_tag ifc_vlan_attach_event; eventhandler_tag ifc_vlan_detach_event; @@ -394,8 +392,7 @@ struct iflib_fl { bus_dma_tag_t ifl_buf_tag; iflib_dma_info_t ifl_ifdi; uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); - caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; - qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; + qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; } __aligned(CACHE_LINE_SIZE); static inline qidx_t @@ -859,7 +856,6 @@ netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ return netmap_ring_reinit(kring); - fl->ifl_vm_addrs[tmp_pidx] = addr; if (__predict_false(init)) { netmap_load_map(na, fl->ifl_buf_tag, map[nic_i], addr); @@ -1333,7 +1329,6 @@ iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t fl = &rxq->ifr_fl[flid]; iru->iru_paddrs = fl->ifl_bus_addrs; - iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; iru->iru_idxs = fl->ifl_rxd_idxs; iru->iru_qsidx = rxq->ifr_id; iru->iru_buf_size = fl->ifl_buf_size; @@ -1987,7 +1982,7 @@ _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int } /** - * _iflib_fl_refill - refill an rxq free-buffer list + * iflib_fl_refill - refill an rxq free-buffer list * @ctx: the iflib context * @fl: the free list to refill * @count: the number of new buffers to allocate @@ -1996,7 +1991,7 @@ _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int * The caller must assure that @count does not exceed the queue's capacity. */ static uint8_t -_iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) +iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) { struct if_rxd_update iru; struct rxq_refill_cb_arg cb_arg; @@ -2033,12 +2028,13 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int coun if (n > 8) DBG_COUNTER_INC(fl_refills_large); iru_init(&iru, fl->ifl_rxq, fl->ifl_id); - while (n--) { + while (n-- > 0) { /* * We allocate an uninitialized mbuf + cluster, mbuf is * initialized after rx. * - * If the cluster is still set then we know a minimum sized packet was received + * If the cluster is still set then we know a minimum sized + * packet was received */ bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); @@ -2046,7 +2042,8 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int coun bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); MPASS(frag_idx >= 0); if ((cl = sd_cl[frag_idx]) == NULL) { - if ((cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) + cl = uma_zalloc(fl->ifl_zone, M_NOWAIT); + if (__predict_false(cl == NULL)) break; cb_arg.error = 0; @@ -2054,16 +2051,12 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int coun err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx], cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, BUS_DMA_NOWAIT); - if (err != 0 || cb_arg.error) { - /* - * !zone_pack ? - */ - if (fl->ifl_zone == zone_pack) - uma_zfree(fl->ifl_zone, cl); + if (__predict_false(err != 0 || cb_arg.error)) { + uma_zfree(fl->ifl_zone, cl); break; } - sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr; + sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr; sd_cl[frag_idx] = cl; #if MEMORY_LOGGING fl->ifl_cl_enqueued++; @@ -2075,9 +2068,9 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int coun BUS_DMASYNC_PREREAD); MPASS(sd_m[frag_idx] == NULL); - if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { + m = m_gethdr(M_NOWAIT, MT_NOINIT); + if (__predict_false(m == NULL)) break; - } sd_m[frag_idx] = m; bit_set(fl->ifl_rx_bitmap, frag_idx); #if MEMORY_LOGGING @@ -2087,50 +2080,58 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int coun DBG_COUNTER_INC(rx_allocs); fl->ifl_rxd_idxs[i] = frag_idx; fl->ifl_bus_addrs[i] = bus_addr; - fl->ifl_vm_addrs[i] = cl; credits++; i++; MPASS(credits <= fl->ifl_size); if (++idx == fl->ifl_size) { +#ifdef INVARIANTS fl->ifl_gen = 1; +#endif idx = 0; } if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); - i = 0; - pidx = idx; fl->ifl_pidx = idx; fl->ifl_credits = credits; + pidx = idx; + i = 0; } } - if (i) { - iru.iru_pidx = pidx; - iru.iru_count = i; - ctx->isc_rxd_refill(ctx->ifc_softc, &iru); - fl->ifl_pidx = idx; - fl->ifl_credits = credits; + if (n < count - 1) { + if (i != 0) { + iru.iru_pidx = pidx; + iru.iru_count = i; + ctx->isc_rxd_refill(ctx->ifc_softc, &iru); + fl->ifl_pidx = idx; + fl->ifl_credits = credits; + } + DBG_COUNTER_INC(rxd_flush); + if (fl->ifl_pidx == 0) + pidx = fl->ifl_size - 1; + else + pidx = fl->ifl_pidx - 1; + + bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, + fl->ifl_id, pidx); + if (__predict_true(bit_test(fl->ifl_rx_bitmap, frag_idx))) { + fl->ifl_fragidx = frag_idx + 1; + if (fl->ifl_fragidx == fl->ifl_size) + fl->ifl_fragidx = 0; + } else { + fl->ifl_fragidx = frag_idx; + } } - DBG_COUNTER_INC(rxd_flush); - if (fl->ifl_pidx == 0) - pidx = fl->ifl_size - 1; - else - pidx = fl->ifl_pidx - 1; - bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); - fl->ifl_fragidx = frag_idx + 1; - if (fl->ifl_fragidx == fl->ifl_size) - fl->ifl_fragidx = 0; - return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY); } -static __inline uint8_t -__iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl) +static inline uint8_t +iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl) { /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; @@ -2142,7 +2143,7 @@ __iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl) MPASS(reclaimable == delta); if (reclaimable > 0) - return (_iflib_fl_refill(ctx, fl, reclaimable)); + return (iflib_fl_refill(ctx, fl, reclaimable)); return (0); } @@ -2173,22 +2174,20 @@ iflib_fl_bufs_free(iflib_fl_t fl) bus_dmamap_sync(fl->ifl_buf_tag, sd_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fl->ifl_buf_tag, sd_map); - if (*sd_cl != NULL) - uma_zfree(fl->ifl_zone, *sd_cl); + uma_zfree(fl->ifl_zone, *sd_cl); + *sd_cl = NULL; if (*sd_m != NULL) { m_init(*sd_m, M_NOWAIT, MT_DATA, 0); uma_zfree(zone_mbuf, *sd_m); + *sd_m = NULL; } } else { - MPASS(*sd_cl == NULL); MPASS(*sd_m == NULL); } #if MEMORY_LOGGING fl->ifl_m_dequeued++; fl->ifl_cl_dequeued++; #endif - *sd_cl = NULL; - *sd_m = NULL; } #ifdef INVARIANTS for (i = 0; i < fl->ifl_size; i++) { @@ -2242,7 +2241,7 @@ iflib_fl_setup(iflib_fl_t fl) /* avoid pre-allocating zillions of clusters to an idle card * potentially speeding up attach */ - (void) _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); + (void)iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); MPASS(min(128, fl->ifl_size) == fl->ifl_credits); if (min(128, fl->ifl_size) != fl->ifl_credits) return (ENOBUFS); @@ -2799,7 +2798,7 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) cidxp = &rxq->ifr_fl[0].ifl_cidx; if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) - retval |= __iflib_fl_refill_all(ctx, fl); + retval |= iflib_fl_refill_all(ctx, fl); DBG_COUNTER_INC(rx_unavail); return (retval); } @@ -2855,7 +2854,7 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) } /* make sure that we can refill faster than drain */ for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) - retval |= __iflib_fl_refill_all(ctx, fl); + retval |= iflib_fl_refill_all(ctx, fl); lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); if (lro_enabled) Modified: stable/12/sys/net/iflib.h ============================================================================== --- stable/12/sys/net/iflib.h Tue Aug 4 15:48:12 2020 (r363844) +++ stable/12/sys/net/iflib.h Tue Aug 4 16:06:58 2020 (r363845) @@ -95,7 +95,6 @@ typedef struct if_rxd_info { typedef struct if_rxd_update { uint64_t *iru_paddrs; - caddr_t *iru_vaddrs; qidx_t *iru_idxs; qidx_t iru_pidx; uint16_t iru_qsidx;