Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 26 Oct 2016 18:20:06 +0000 (UTC)
From:      David C Somayajulu <davidcs@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-9@freebsd.org
Subject:   svn commit: r307974 - stable/9/sys/dev/bxe
Message-ID:  <201610261820.u9QIK6FJ052642@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: davidcs
Date: Wed Oct 26 18:20:06 2016
New Revision: 307974
URL: https://svnweb.freebsd.org/changeset/base/307974

Log:
  MFC r307578
      1. Use taskqueue_create() instead of taskqueue_create_fast() for both
         fastpath and slowpath taskqueues.
      2. Service all transmits in taskqueue threads.
      3. additional stats counters for  keeping track of
      	- bd availability
      	- tx buf ring not emptied in the fp task queue.
      	  These are drained via timeout taskqueue.
      	- tx attempts during link down.

Modified:
  stable/9/sys/dev/bxe/bxe.c
  stable/9/sys/dev/bxe/bxe.h
  stable/9/sys/dev/bxe/bxe_stats.h
Directory Properties:
  stable/9/   (props changed)
  stable/9/sys/   (props changed)

Modified: stable/9/sys/dev/bxe/bxe.c
==============================================================================
--- stable/9/sys/dev/bxe/bxe.c	Wed Oct 26 18:13:30 2016	(r307973)
+++ stable/9/sys/dev/bxe/bxe.c	Wed Oct 26 18:20:06 2016	(r307974)
@@ -27,7 +27,7 @@
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
-#define BXE_DRIVER_VERSION "1.78.81"
+#define BXE_DRIVER_VERSION "1.78.89"
 
 #include "bxe.h"
 #include "ecore_sp.h"
@@ -500,7 +500,14 @@ static const struct {
     { STATS_OFFSET32(mbuf_alloc_tpa),
                 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
     { STATS_OFFSET32(tx_queue_full_return),
-                4, STATS_FLAGS_FUNC, "tx_queue_full_return"}
+                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
+    { STATS_OFFSET32(tx_request_link_down_failures),
+                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
+    { STATS_OFFSET32(bd_avail_too_less_failures),
+                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
+    { STATS_OFFSET32(tx_mq_not_empty),
+                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}
+
 };
 
 static const struct {
@@ -613,7 +620,14 @@ static const struct {
     { Q_STATS_OFFSET32(mbuf_alloc_tpa),
                 4, "mbuf_alloc_tpa"},
     { Q_STATS_OFFSET32(tx_queue_full_return),
-                4, "tx_queue_full_return"}
+                4, "tx_queue_full_return"},
+    { Q_STATS_OFFSET32(tx_request_link_down_failures),
+                4, "tx_request_link_down_failures"},
+    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
+                4, "bd_avail_too_less_failures"},
+    { Q_STATS_OFFSET32(tx_mq_not_empty),
+                4, "tx_mq_not_empty"}
+
 };
 
 #define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
@@ -5620,7 +5634,7 @@ bxe_tx_start(struct ifnet *ifp)
     BXE_FP_TX_UNLOCK(fp);
 }
 
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
 
 static int
 bxe_tx_mq_start_locked(struct bxe_softc    *sc,
@@ -5642,10 +5656,16 @@ bxe_tx_mq_start_locked(struct bxe_softc 
         return (EINVAL);
     }
 
-    if (!sc->link_vars.link_up ||
-        (ifp->if_drv_flags &
-        (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
+    if (m != NULL) {
         rc = drbr_enqueue(ifp, tx_br, m);
+        if (rc != 0) {
+            fp->eth_q_stats.tx_soft_errors++;
+            goto bxe_tx_mq_start_locked_exit;
+        }
+    }
+
+    if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+        fp->eth_q_stats.tx_request_link_down_failures++;
         goto bxe_tx_mq_start_locked_exit;
     }
 
@@ -5655,24 +5675,22 @@ bxe_tx_mq_start_locked(struct bxe_softc 
         fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
     }
 
-    if (m == NULL) {
-        /* no new work, check for pending frames */
-        next = drbr_dequeue(ifp, tx_br);
-    } else if (drbr_needs_enqueue(ifp, tx_br)) {
-        /* have both new and pending work, maintain packet order */
-        rc = drbr_enqueue(ifp, tx_br, m);
-        if (rc != 0) {
-            fp->eth_q_stats.tx_soft_errors++;
-            goto bxe_tx_mq_start_locked_exit;
-        }
-        next = drbr_dequeue(ifp, tx_br);
-    } else {
-        /* new work only and nothing pending */
-        next = m;
-    }
-
     /* keep adding entries while there are frames to send */
-    while (next != NULL) {
+    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
+        /* handle any completions if we're running low */
+        tx_bd_avail = bxe_tx_avail(sc, fp);
+        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
+            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
+            bxe_txeof(sc, fp);
+            tx_bd_avail = bxe_tx_avail(sc, fp);
+            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
+                fp->eth_q_stats.bd_avail_too_less_failures++;
+                m_freem(next);
+                drbr_advance(ifp, tx_br);
+                rc = ENOBUFS;
+                break;
+            }
+        }
 
         /* the mbuf now belongs to us */
         fp->eth_q_stats.mbuf_alloc_tx++;
@@ -5688,11 +5706,11 @@ bxe_tx_mq_start_locked(struct bxe_softc 
             if (next != NULL) {
                 /* mark the TX queue as full and save the frame */
                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
-                /* XXX this may reorder the frame */
-                rc = drbr_enqueue(ifp, tx_br, next);
+                drbr_putback(ifp, tx_br, next);
                 fp->eth_q_stats.mbuf_alloc_tx--;
                 fp->eth_q_stats.tx_frames_deferred++;
-            }
+            } else
+                drbr_advance(ifp, tx_br);
 
             /* stop looking for more work */
             break;
@@ -5704,18 +5722,7 @@ bxe_tx_mq_start_locked(struct bxe_softc 
         /* send a copy of the frame to any BPF listeners */
         BPF_MTAP(ifp, next);
 
-        tx_bd_avail = bxe_tx_avail(sc, fp);
-
-        /* handle any completions if we're running low */
-        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
-            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
-            bxe_txeof(sc, fp);
-            if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
-                break;
-            }
-        }
-
-        next = drbr_dequeue(ifp, tx_br);
+        drbr_advance(ifp, tx_br);
     }
 
     /* all TX packets were dequeued and/or the tx ring is full */
@@ -5725,10 +5732,28 @@ bxe_tx_mq_start_locked(struct bxe_softc 
     }
 
 bxe_tx_mq_start_locked_exit:
+    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
+    if (!drbr_empty(ifp, tx_br)) {
+        fp->eth_q_stats.tx_mq_not_empty++;
+        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
+    }
 
     return (rc);
 }
 
+static void
+bxe_tx_mq_start_deferred(void *arg,
+                         int pending)
+{
+    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
+    struct bxe_softc *sc = fp->sc;
+    struct ifnet *ifp = sc->ifnet;
+
+    BXE_FP_TX_LOCK(fp);
+    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
+    BXE_FP_TX_UNLOCK(fp);
+}
+
 /* Multiqueue (TSS) dispatch routine. */
 static int
 bxe_tx_mq_start(struct ifnet *ifp,
@@ -5750,8 +5775,10 @@ bxe_tx_mq_start(struct ifnet *ifp,
     if (BXE_FP_TX_TRYLOCK(fp)) {
         rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
         BXE_FP_TX_UNLOCK(fp);
-    } else
+    } else {
         rc = drbr_enqueue(ifp, fp->tx_br, m);
+        taskqueue_enqueue(fp->tq, &fp->tx_task);
+    }
 
     return (rc);
 }
@@ -5786,7 +5813,7 @@ bxe_mq_flush(struct ifnet *ifp)
     if_qflush(ifp);
 }
 
-#endif /* FreeBSD_version >= 800000 */
+#endif /* FreeBSD_version >= 901504 */
 
 static uint16_t
 bxe_cid_ilt_lines(struct bxe_softc *sc)
@@ -6146,7 +6173,7 @@ bxe_free_fp_buffers(struct bxe_softc *sc
     for (i = 0; i < sc->num_queues; i++) {
         fp = &sc->fp[i];
 
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
         if (fp->tx_br != NULL) {
             /* just in case bxe_mq_flush() wasn't called */
             if (mtx_initialized(&fp->tx_mtx)) {
@@ -6973,6 +7000,8 @@ bxe_link_attn(struct bxe_softc *sc)
     uint32_t pause_enabled = 0;
     struct host_port_stats *pstats;
     int cmng_fns;
+    struct bxe_fastpath *fp;
+    int i;
 
     /* Make sure that we are synced with the current statistics */
     bxe_stats_handle(sc, STATS_EVENT_STOP);
@@ -7004,6 +7033,12 @@ bxe_link_attn(struct bxe_softc *sc)
         if (sc->state == BXE_STATE_OPEN) {
             bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
         }
+
+	/* Restart tx when the link comes back. */
+        FOR_EACH_ETH_QUEUE(sc, i) {
+            fp = &sc->fp[i];
+            taskqueue_enqueue(fp->tq, &fp->tx_task);
+	}
     }
 
     if (sc->link_vars.link_up && sc->link_vars.line_speed) {
@@ -9055,6 +9090,10 @@ bxe_interrupt_detach(struct bxe_softc *s
         fp = &sc->fp[i];
         if (fp->tq) {
             taskqueue_drain(fp->tq, &fp->tq_task);
+            taskqueue_drain(fp->tq, &fp->tx_task);
+            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
+                NULL))
+                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
             taskqueue_free(fp->tq);
             fp->tq = NULL;
         }
@@ -9087,9 +9126,9 @@ bxe_interrupt_attach(struct bxe_softc *s
     snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
              "bxe%d_sp_tq", sc->unit);
     TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
-    sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
-                                      taskqueue_thread_enqueue,
-                                      &sc->sp_tq);
+    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
+                                 taskqueue_thread_enqueue,
+                                 &sc->sp_tq);
     taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
                             "%s", sc->sp_tq_name);
 
@@ -9099,9 +9138,12 @@ bxe_interrupt_attach(struct bxe_softc *s
         snprintf(fp->tq_name, sizeof(fp->tq_name),
                  "bxe%d_fp%d_tq", sc->unit, i);
         TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
-        fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
-                                       taskqueue_thread_enqueue,
-                                       &fp->tq);
+        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
+        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
+                                  taskqueue_thread_enqueue,
+                                  &fp->tq);
+        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
+                          bxe_tx_mq_start_deferred, fp);
         taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
                                 "%s", fp->tq_name);
     }
@@ -12158,8 +12200,6 @@ static void
 bxe_periodic_callout_func(void *xsc)
 {
     struct bxe_softc *sc = (struct bxe_softc *)xsc;
-    struct bxe_fastpath *fp;
-    uint16_t tx_bd_avail;
     int i;
 
     if (!BXE_CORE_TRYLOCK(sc)) {
@@ -12182,47 +12222,6 @@ bxe_periodic_callout_func(void *xsc)
         return;
     }
 
-#if __FreeBSD_version >= 800000
-
-    FOR_EACH_QUEUE(sc, i) {
-        fp = &sc->fp[i];
-
-        if (BXE_FP_TX_TRYLOCK(fp)) {
-            struct ifnet *ifp = sc->ifnet;
-            /*
-             * If interface was stopped due to unavailable
-             * bds, try to process some tx completions
-             */
-            (void) bxe_txeof(sc, fp);
-           
-            tx_bd_avail = bxe_tx_avail(sc, fp);
-            if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) {
-                bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
-            }
-            BXE_FP_TX_UNLOCK(fp);
-        }
-    }
-
-#else
-
-    fp = &sc->fp[0];
-    if (BXE_FP_TX_TRYLOCK(fp)) {
-        struct ifnet *ifp = sc->ifnet;
-        /*
-         * If interface was stopped due to unavailable
-         * bds, try to process some tx completions
-         */
-        (void) bxe_txeof(sc, fp);
-           
-        tx_bd_avail = bxe_tx_avail(sc, fp);
-        if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) {
-            bxe_tx_start_locked(sc, ifp, fp);
-        }
- 
-        BXE_FP_TX_UNLOCK(fp);
-    }
-
-#endif /* #if __FreeBSD_version >= 800000 */
 
     /* Check for TX timeouts on any fastpath. */
     FOR_EACH_QUEUE(sc, i) {
@@ -12698,7 +12697,7 @@ bxe_init_ifnet(struct bxe_softc *sc)
     ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
     ifp->if_ioctl = bxe_ioctl;
     ifp->if_start = bxe_tx_start;
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
     ifp->if_transmit = bxe_tx_mq_start;
     ifp->if_qflush = bxe_mq_flush;
 #endif
@@ -15745,7 +15744,7 @@ bxe_add_sysctls(struct bxe_softc *sc)
 static int
 bxe_alloc_buf_rings(struct bxe_softc *sc)
 {
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
 
     int i;
     struct bxe_fastpath *fp;
@@ -15766,7 +15765,7 @@ bxe_alloc_buf_rings(struct bxe_softc *sc
 static void
 bxe_free_buf_rings(struct bxe_softc *sc)
 {
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
 
     int i;
     struct bxe_fastpath *fp;

Modified: stable/9/sys/dev/bxe/bxe.h
==============================================================================
--- stable/9/sys/dev/bxe/bxe.h	Wed Oct 26 18:13:30 2016	(r307973)
+++ stable/9/sys/dev/bxe/bxe.h	Wed Oct 26 18:20:06 2016	(r307974)
@@ -644,6 +644,9 @@ struct bxe_fastpath {
     struct taskqueue *tq;
     char             tq_name[32];
 
+    struct task tx_task;
+    struct timeout_task tx_timeout_task;
+
     /* ethernet client ID (each fastpath set of RX/TX/CQE is a client) */
     uint8_t cl_id;
 #define FP_CL_ID(fp) (fp->cl_id)
@@ -2300,7 +2303,8 @@ void bxe_dump_mbuf_data(struct bxe_softc
 extern int bxe_grc_dump(struct bxe_softc *sc);
 
 #if __FreeBSD_version >= 800000
-#if __FreeBSD_version >= 1000000
+#if (__FreeBSD_version >= 1001513 && __FreeBSD_version < 1100000) ||\
+    __FreeBSD_version >= 1100048
 #define BXE_SET_FLOWID(m) M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE)
 #define BXE_VALID_FLOWID(m) (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
 #else

Modified: stable/9/sys/dev/bxe/bxe_stats.h
==============================================================================
--- stable/9/sys/dev/bxe/bxe_stats.h	Wed Oct 26 18:13:30 2016	(r307973)
+++ stable/9/sys/dev/bxe/bxe_stats.h	Wed Oct 26 18:20:06 2016	(r307974)
@@ -266,6 +266,10 @@ struct bxe_eth_stats {
 
     /* num. of times tx queue full occured */
     uint32_t tx_queue_full_return;
+    /* debug stats */
+    uint32_t tx_request_link_down_failures;
+    uint32_t bd_avail_too_less_failures;
+    uint32_t tx_mq_not_empty;
 };
 
 
@@ -372,6 +376,11 @@ struct bxe_eth_q_stats {
 
     /* num. of times tx queue full occured */
     uint32_t tx_queue_full_return;
+
+    /* debug stats */
+    uint32_t tx_request_link_down_failures;
+    uint32_t bd_avail_too_less_failures;
+    uint32_t tx_mq_not_empty;
 };
 
 struct bxe_eth_stats_old {



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201610261820.u9QIK6FJ052642>