From owner-svn-src-stable@freebsd.org Mon Feb 24 09:38:48 2020 Return-Path: Delivered-To: svn-src-stable@mailman.nyi.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.nyi.freebsd.org (Postfix) with ESMTP id 4052925720B; Mon, 24 Feb 2020 09:38:48 +0000 (UTC) (envelope-from hselasky@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) server-signature RSA-PSS (4096 bits) client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 48QxpH6MQJz41Mc; Mon, 24 Feb 2020 09:38:47 +0000 (UTC) (envelope-from hselasky@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 9DE6F233B7; Mon, 24 Feb 2020 09:38:47 +0000 (UTC) (envelope-from hselasky@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id 01O9clLv073065; Mon, 24 Feb 2020 09:38:47 GMT (envelope-from hselasky@FreeBSD.org) Received: (from hselasky@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id 01O9clNV073062; Mon, 24 Feb 2020 09:38:47 GMT (envelope-from hselasky@FreeBSD.org) Message-Id: <202002240938.01O9clNV073062@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: hselasky set sender to hselasky@FreeBSD.org using -f From: Hans Petter Selasky Date: Mon, 24 Feb 2020 09:38:47 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org Subject: svn commit: r358271 - stable/12/sys/net X-SVN-Group: stable-12 X-SVN-Commit-Author: hselasky X-SVN-Commit-Paths: stable/12/sys/net X-SVN-Commit-Revision: 358271 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-stable@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: SVN commit messages for all the -stable branches of the src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 24 Feb 2020 09:38:48 -0000 Author: hselasky Date: Mon Feb 24 09:38:47 2020 New Revision: 358271 URL: https://svnweb.freebsd.org/changeset/base/358271 Log: MFC r357799: Make sure the so-called end of receive interrupts don't starve in iflib. When the receive ring cannot be filled with mbufs, due to lack of memory, no more interrupts may be generated to fill the receive ring later on. Make sure to have a watchdog, to try refilling the receive ring from time to time, hopefully when more mbufs are available. Differential Revision: https://reviews.freebsd.org/D23315 Reviewed by: gallatin@ Sponsored by: Mellanox Technologies Modified: stable/12/sys/net/iflib.c Directory Properties: stable/12/ (props changed) Modified: stable/12/sys/net/iflib.c ============================================================================== --- stable/12/sys/net/iflib.c Mon Feb 24 09:31:30 2020 (r358270) +++ stable/12/sys/net/iflib.c Mon Feb 24 09:38:47 2020 (r358271) @@ -131,6 +131,9 @@ __FBSDID("$FreeBSD$"); */ MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); +#define IFLIB_RXEOF_MORE (1U << 0) +#define IFLIB_RXEOF_EMPTY (2U << 0) + struct iflib_txq; typedef struct iflib_txq *iflib_txq_t; struct iflib_rxq; @@ -435,6 +438,7 @@ struct iflib_rxq { uint8_t ifr_fl_offset; struct lro_ctrl ifr_lc; struct grouptask ifr_task; + struct callout ifr_watchdog; struct iflib_filter_info ifr_filter_info; iflib_dma_info_t ifr_ifdi; @@ -1979,7 +1983,7 @@ _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int * (Re)populate an rxq free-buffer list with up to @count new packet buffers. * The caller must assure that @count does not exceed the queue's capacity. */ -static void +static uint8_t _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) { struct if_rxd_update iru; @@ -2107,9 +2111,11 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int coun BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); fl->ifl_fragidx = frag_idx; + + return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY); } -static __inline void +static __inline uint8_t __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) { /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ @@ -2122,7 +2128,8 @@ __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int MPASS(reclaimable == delta); if (reclaimable > 0) - _iflib_fl_refill(ctx, fl, min(max, reclaimable)); + return (_iflib_fl_refill(ctx, fl, min(max, reclaimable))); + return (0); } uint8_t @@ -2210,7 +2217,7 @@ iflib_fl_setup(iflib_fl_t fl) /* avoid pre-allocating zillions of clusters to an idle card * potentially speeding up attach */ - _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); + (void) _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); MPASS(min(128, fl->ifl_size) == fl->ifl_credits); if (min(128, fl->ifl_size) != fl->ifl_credits) return (ENOBUFS); @@ -2720,7 +2727,15 @@ iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, } #endif -static bool +static void +_task_fn_rx_watchdog(void *context) +{ + iflib_rxq_t rxq = context; + + GROUPTASK_ENQUEUE(&rxq->ifr_task); +} + +static uint8_t iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) { if_t ifp; @@ -2734,6 +2749,7 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) iflib_fl_t fl; int lro_enabled; bool v4_forwarding, v6_forwarding, lro_possible; + uint8_t retval = 0; /* * XXX early demux data packets so that if_input processing only handles @@ -2752,9 +2768,9 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) cidxp = &rxq->ifr_fl[0].ifl_cidx; if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) - __iflib_fl_refill_lt(ctx, fl, budget + 8); + retval |= __iflib_fl_refill_lt(ctx, fl, budget + 8); DBG_COUNTER_INC(rx_unavail); - return (false); + return (retval); } for (budget_left = budget; budget_left > 0 && avail > 0;) { @@ -2808,7 +2824,7 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) } /* make sure that we can refill faster than drain */ for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) - __iflib_fl_refill_lt(ctx, fl, budget + 8); + retval |= __iflib_fl_refill_lt(ctx, fl, budget + 8); lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); if (lro_enabled) @@ -2867,15 +2883,15 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) #if defined(INET6) || defined(INET) tcp_lro_flush_all(&rxq->ifr_lc); #endif - if (avail) - return true; - return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); + if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0) + retval |= IFLIB_RXEOF_MORE; + return (retval); err: STATE_LOCK(ctx); ctx->ifc_flags |= IFC_DO_RESET; iflib_admin_intr_deferred(ctx); STATE_UNLOCK(ctx); - return (false); + return (0); } #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) @@ -3761,7 +3777,7 @@ _task_fn_rx(void *context) { iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; - bool more; + uint8_t more; uint16_t budget; #ifdef IFLIB_DIAGNOSTICS @@ -3770,19 +3786,23 @@ _task_fn_rx(void *context) DBG_COUNTER_INC(task_fn_rxs); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; - more = true; #ifdef DEV_NETMAP if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { u_int work = 0; if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { - more = false; + more = 0; + goto skip_rxeof; } } #endif budget = ctx->ifc_sysctl_rx_budget; if (budget == 0) budget = 16; /* XXX */ - if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { + more = iflib_rxeof(rxq, budget); +#ifdef DEV_NETMAP +skip_rxeof: +#endif + if ((more & IFLIB_RXEOF_MORE) == 0) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else @@ -3791,8 +3811,11 @@ _task_fn_rx(void *context) } if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; - if (more) + + if (more & IFLIB_RXEOF_MORE) GROUPTASK_ENQUEUE(&rxq->ifr_task); + else if (more & IFLIB_RXEOF_EMPTY) + callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq); } static void @@ -4973,6 +4996,7 @@ iflib_pseudo_deregister(if_ctx_t ctx) taskqgroup_detach(tqg, &txq->ift_task); } for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { + callout_drain(&rxq->ifr_watchdog); if (rxq->ifr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &rxq->ifr_task); @@ -5472,6 +5496,7 @@ iflib_queues_alloc(if_ctx_t ctx) for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { /* Set up some basics */ + callout_init(&rxq->ifr_watchdog, 1); if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {