Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 25 May 2017 00:43:56 +0000 (UTC)
From:      Navdeep Parhar <np@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org
Subject:   svn commit: r318842 - stable/11/sys/dev/cxgbe
Message-ID:  <201705250043.v4P0huqS011913@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: np
Date: Thu May 25 00:43:56 2017
New Revision: 318842
URL: https://svnweb.freebsd.org/changeset/base/318842

Log:
  MFC r317041:
  
  cxgbe: Add tunables to control the number of LRO entries and the number
  of rx mbufs that should be presorted before LRO.  There is no change in
  default behavior.
  
  Sponsored by:	Chelsio Communications

Modified:
  stable/11/sys/dev/cxgbe/adapter.h
  stable/11/sys/dev/cxgbe/t4_sge.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/dev/cxgbe/adapter.h
==============================================================================
--- stable/11/sys/dev/cxgbe/adapter.h	Thu May 25 00:34:01 2017	(r318841)
+++ stable/11/sys/dev/cxgbe/adapter.h	Thu May 25 00:43:56 2017	(r318842)
@@ -322,6 +322,7 @@ enum {
 	IQ_HAS_FL	= (1 << 1),	/* iq associated with a freelist */
 	IQ_INTR		= (1 << 2),	/* iq takes direct interrupt */
 	IQ_LRO_ENABLED	= (1 << 3),	/* iq is an eth rxq with LRO enabled */
+	IQ_ADJ_CREDIT	= (1 << 4),	/* hw is off by 1 credit for this iq */
 
 	/* iq state */
 	IQS_DISABLED	= 0,

Modified: stable/11/sys/dev/cxgbe/t4_sge.c
==============================================================================
--- stable/11/sys/dev/cxgbe/t4_sge.c	Thu May 25 00:34:01 2017	(r318841)
+++ stable/11/sys/dev/cxgbe/t4_sge.c	Thu May 25 00:43:56 2017	(r318842)
@@ -157,6 +157,18 @@ TUNABLE_INT("hw.cxgbe.safest_rx_cluster"
 static int tscale = 1;
 TUNABLE_INT("hw.cxgbe.tscale", &tscale);
 
+/*
+ * Number of LRO entries in the lro_ctrl structure per rx queue.
+ */
+static int lro_entries = TCP_LRO_ENTRIES;
+TUNABLE_INT("hw.cxgbe.lro_entries", &lro_entries);
+
+/*
+ * This enables presorting of frames before they're fed into tcp_lro_rx.
+ */
+static int lro_mbufs = 0;
+TUNABLE_INT("hw.cxgbe.lro_mbufs", &lro_mbufs);
+
 struct txpkts {
 	u_int wr_type;		/* type 0 or type 1 */
 	u_int npkt;		/* # of packets in this work request */
@@ -1371,6 +1383,13 @@ t4_vi_intr(void *arg)
 		t4_intr(irq->rxq);
 }
 
+static inline int
+sort_before_lro(struct lro_ctrl *lro)
+{
+
+	return (lro->lro_mbuf_max != 0);
+}
+
 /*
  * Deals with anything and everything on the given ingress queue.
  */
@@ -1390,6 +1409,7 @@ service_iq(struct sge_iq *iq, int budget
 	STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
 #if defined(INET) || defined(INET6)
 	const struct timeval lro_timeout = {0, sc->lro_timeout};
+	struct lro_ctrl *lro = &rxq->lro;
 #endif
 
 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
@@ -1404,6 +1424,23 @@ service_iq(struct sge_iq *iq, int budget
 		fl_hw_cidx = 0;			/* to silence gcc warning */
 	}
 
+#if defined(INET) || defined(INET6)
+	if (iq->flags & IQ_ADJ_CREDIT) {
+		MPASS(sort_before_lro(lro));
+		iq->flags &= ~IQ_ADJ_CREDIT;
+		if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) {
+			tcp_lro_flush_all(lro);
+			t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) |
+			    V_INGRESSQID((u32)iq->cntxt_id) |
+			    V_SEINTARM(iq->intr_params));
+			return (0);
+		}
+		ndescs = 1;
+	}
+#else
+	MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
+#endif
+
 	/*
 	 * We always come back and check the descriptor ring for new indirect
 	 * interrupts and other responses after running a single handler.
@@ -1515,8 +1552,9 @@ service_iq(struct sge_iq *iq, int budget
 
 #if defined(INET) || defined(INET6)
 				if (iq->flags & IQ_LRO_ENABLED &&
+				    !sort_before_lro(lro) &&
 				    sc->lro_timeout != 0) {
-					tcp_lro_flush_inactive(&rxq->lro,
+					tcp_lro_flush_inactive(lro,
 					    &lro_timeout);
 				}
 #endif
@@ -1556,9 +1594,14 @@ process_iql:
 
 #if defined(INET) || defined(INET6)
 	if (iq->flags & IQ_LRO_ENABLED) {
-		struct lro_ctrl *lro = &rxq->lro;
-
-		tcp_lro_flush_all(lro);
+		if (ndescs > 0 && lro->lro_mbuf_count > 8) {
+			MPASS(sort_before_lro(lro));
+			/* hold back one credit and don't flush LRO state */
+			iq->flags |= IQ_ADJ_CREDIT;
+			ndescs--;
+		} else {
+			tcp_lro_flush_all(lro);
+		}
 	}
 #endif
 
@@ -1847,10 +1890,14 @@ t4_eth_rx(struct sge_iq *iq, const struc
 	}
 
 #if defined(INET) || defined(INET6)
-	if (iq->flags & IQ_LRO_ENABLED &&
-	    tcp_lro_rx(lro, m0, 0) == 0) {
-		/* queued for LRO */
-	} else
+	if (iq->flags & IQ_LRO_ENABLED) {
+		if (sort_before_lro(lro)) {
+			tcp_lro_queue_mbuf(lro, m0);
+			return (0); /* queued for sort, then LRO */
+		}
+		if (tcp_lro_rx(lro, m0, 0) == 0)
+			return (0); /* queued for LRO */
+	}
 #endif
 	ifp->if_input(ifp, m0);
 
@@ -3041,10 +3088,10 @@ alloc_rxq(struct vi_info *vi, struct sge
 	FL_UNLOCK(&rxq->fl);
 
 #if defined(INET) || defined(INET6)
-	rc = tcp_lro_init(&rxq->lro);
+	rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs);
 	if (rc != 0)
 		return (rc);
-	rxq->lro.ifp = vi->ifp; /* also indicates LRO init'ed */
+	MPASS(rxq->lro.ifp == vi->ifp);	/* also indicates LRO init'ed */
 
 	if (vi->ifp->if_capenable & IFCAP_LRO)
 		rxq->iq.flags |= IQ_LRO_ENABLED;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201705250043.v4P0huqS011913>