Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 2 Feb 2014 05:15:36 +0000 (UTC)
From:      Bryan Venteicher <bryanv@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r261394 - head/sys/dev/virtio/network
Message-ID:  <201402020515.s125Fbxn061750@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: bryanv
Date: Sun Feb  2 05:15:36 2014
New Revision: 261394
URL: http://svnweb.freebsd.org/changeset/base/261394

Log:
  Do not place the sglist used for Rx/Tx on the stack
  
  The sglist segment array has grown to a bit over 512 bytes (on
  64-bit system) which is more than ideally should be put on the
  stack. Instead allocate an appropriately sized sglist and hang
  it off each Rx/Tx queue structure.
  
  Bump the maximum number of Tx segments to 64 to make it unlikely
  we'll have defragment an mbuf chain. Our previous count was
  rounded up to this value since it is the next power of two, so
  effective memory usage should not change.
  
  Also only allocate the maximum number of Tx segments if TSO was
  negotiated.

Modified:
  head/sys/dev/virtio/network/if_vtnet.c
  head/sys/dev/virtio/network/if_vtnetvar.h

Modified: head/sys/dev/virtio/network/if_vtnet.c
==============================================================================
--- head/sys/dev/virtio/network/if_vtnet.c	Sun Feb  2 00:48:15 2014	(r261393)
+++ head/sys/dev/virtio/network/if_vtnet.c	Sun Feb  2 05:15:36 2014	(r261394)
@@ -609,6 +609,20 @@ vtnet_setup_features(struct vtnet_softc 
 	} else
 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
 
+	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
+		sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
+	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
+		sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
+	else
+		sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
+
+	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
+	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
+	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
+		sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
+	else
+		sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
+
 	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
 		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
 
@@ -664,6 +678,10 @@ vtnet_init_rxq(struct vtnet_softc *sc, i
 	rxq->vtnrx_sc = sc;
 	rxq->vtnrx_id = id;
 
+	rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
+	if (rxq->vtnrx_sg == NULL)
+		return (ENOMEM);
+
 	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
 	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
 	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
@@ -685,6 +703,10 @@ vtnet_init_txq(struct vtnet_softc *sc, i
 	txq->vtntx_sc = sc;
 	txq->vtntx_id = id;
 
+	txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
+	if (txq->vtntx_sg == NULL)
+		return (ENOMEM);
+
 #ifndef VTNET_LEGACY_TX
 	txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
 	    M_NOWAIT, &txq->vtntx_mtx);
@@ -737,6 +759,11 @@ vtnet_destroy_rxq(struct vtnet_rxq *rxq)
 	rxq->vtnrx_sc = NULL;
 	rxq->vtnrx_id = -1;
 
+	if (rxq->vtnrx_sg != NULL) {
+		sglist_free(rxq->vtnrx_sg);
+		rxq->vtnrx_sg = NULL;
+	}
+
 	if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
 		mtx_destroy(&rxq->vtnrx_mtx);
 }
@@ -748,6 +775,11 @@ vtnet_destroy_txq(struct vtnet_txq *txq)
 	txq->vtntx_sc = NULL;
 	txq->vtntx_id = -1;
 
+	if (txq->vtntx_sg != NULL) {
+		sglist_free(txq->vtntx_sg);
+		txq->vtntx_sg = NULL;
+	}
+
 #ifndef VTNET_LEGACY_TX
 	if (txq->vtntx_br != NULL) {
 		buf_ring_free(txq->vtntx_br, M_DEVBUF);
@@ -822,24 +854,11 @@ vtnet_alloc_virtqueues(struct vtnet_soft
 	struct vq_alloc_info *info;
 	struct vtnet_rxq *rxq;
 	struct vtnet_txq *txq;
-	int i, idx, flags, nvqs, rxsegs, error;
+	int i, idx, flags, nvqs, error;
 
 	dev = sc->vtnet_dev;
 	flags = 0;
 
-	/*
-	 * Indirect descriptors are not needed for the Rx virtqueue when
-	 * mergeable buffers are negotiated. The header is placed inline
-	 * with the data, not in a separate descriptor, and mbuf clusters
-	 * are always physically contiguous.
-	 */
-	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
-		rxsegs = 0;
-	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
-		rxsegs = VTNET_MAX_RX_SEGS;
-	else
-		rxsegs = VTNET_MIN_RX_SEGS;
-
 	nvqs = sc->vtnet_max_vq_pairs * 2;
 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
 		nvqs++;
@@ -850,12 +869,12 @@ vtnet_alloc_virtqueues(struct vtnet_soft
 
 	for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
 		rxq = &sc->vtnet_rxqs[i];
-		VQ_ALLOC_INFO_INIT(&info[idx], rxsegs,
+		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
 		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
 		    "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
 
 		txq = &sc->vtnet_txqs[i];
-		VQ_ALLOC_INFO_INIT(&info[idx+1], VTNET_MAX_TX_SEGS,
+		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
 		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
 		    "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
 	}
@@ -1362,14 +1381,14 @@ vtnet_rxq_replace_buf(struct vtnet_rxq *
 static int
 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
 {
-	struct sglist sg;
-	struct sglist_seg segs[VTNET_MAX_RX_SEGS];
 	struct vtnet_softc *sc;
+	struct sglist *sg;
 	struct vtnet_rx_header *rxhdr;
 	uint8_t *mdata;
 	int offset, error;
 
 	sc = rxq->vtnrx_sc;
+	sg = rxq->vtnrx_sg;
 	mdata = mtod(m, uint8_t *);
 
 	VTNET_RXQ_LOCK_ASSERT(rxq);
@@ -1379,22 +1398,22 @@ vtnet_rxq_enqueue_buf(struct vtnet_rxq *
 	    ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
 	     sc->vtnet_rx_clsize));
 
-	sglist_init(&sg, VTNET_MAX_RX_SEGS, segs);
+	sglist_reset(sg);
 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
 		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
 		rxhdr = (struct vtnet_rx_header *) mdata;
-		sglist_append(&sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
+		sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
 		offset = sizeof(struct vtnet_rx_header);
 	} else
 		offset = 0;
 
-	sglist_append(&sg, mdata + offset, m->m_len - offset);
+	sglist_append(sg, mdata + offset, m->m_len - offset);
 	if (m->m_next != NULL) {
-		error = sglist_append_mbuf(&sg, m->m_next);
+		error = sglist_append_mbuf(sg, m->m_next);
 		MPASS(error == 0);
 	}
 
-	error = virtqueue_enqueue(rxq->vtnrx_vq, m, &sg, 0, sg.sg_nseg);
+	error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
 
 	return (error);
 }
@@ -2048,30 +2067,30 @@ static int
 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
     struct vtnet_tx_header *txhdr)
 {
-	struct sglist sg;
-	struct sglist_seg segs[VTNET_MAX_TX_SEGS];
 	struct vtnet_softc *sc;
 	struct virtqueue *vq;
+	struct sglist *sg;
 	struct mbuf *m;
 	int collapsed, error;
 
-	vq = txq->vtntx_vq;
 	sc = txq->vtntx_sc;
+	vq = txq->vtntx_vq;
+	sg = txq->vtntx_sg;
 	m = *m_head;
 	collapsed = 0;
 
-	sglist_init(&sg, VTNET_MAX_TX_SEGS, segs);
-	error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
-	KASSERT(error == 0 && sg.sg_nseg == 1,
+	sglist_reset(sg);
+	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
+	KASSERT(error == 0 && sg->sg_nseg == 1,
 	    ("%s: error %d adding header to sglist", __func__, error));
 
 again:
-	error = sglist_append_mbuf(&sg, m);
+	error = sglist_append_mbuf(sg, m);
 	if (error) {
 		if (collapsed)
 			goto fail;
 
-		m = m_collapse(m, M_NOWAIT, VTNET_MAX_TX_SEGS - 1);
+		m = m_collapse(m, M_NOWAIT, sc->vtnet_tx_nsegs - 1);
 		if (m == NULL)
 			goto fail;
 
@@ -2082,7 +2101,7 @@ again:
 	}
 
 	txhdr->vth_mbuf = m;
-	error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
+	error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
 
 	return (error);
 
@@ -2849,9 +2868,10 @@ vtnet_init_rx_queues(struct vtnet_softc 
 	sc->vtnet_rx_clsize = clsize;
 	sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
 
-	/* The first segment is reserved for the header. */
-	KASSERT(sc->vtnet_rx_nmbufs < VTNET_MAX_RX_SEGS,
-	    ("%s: too many rx mbufs %d", __func__, sc->vtnet_rx_nmbufs));
+	KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
+	    sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
+	    ("%s: too many rx mbufs %d for %d segments", __func__,
+	    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
 
 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
 		rxq = &sc->vtnet_rxqs[i];

Modified: head/sys/dev/virtio/network/if_vtnetvar.h
==============================================================================
--- head/sys/dev/virtio/network/if_vtnetvar.h	Sun Feb  2 00:48:15 2014	(r261393)
+++ head/sys/dev/virtio/network/if_vtnetvar.h	Sun Feb  2 05:15:36 2014	(r261394)
@@ -70,6 +70,7 @@ struct vtnet_rxq {
 	struct mtx		 vtnrx_mtx;
 	struct vtnet_softc	*vtnrx_sc;
 	struct virtqueue	*vtnrx_vq;
+	struct sglist		*vtnrx_sg;
 	int			 vtnrx_id;
 	int			 vtnrx_process_limit;
 	struct vtnet_rxq_stats	 vtnrx_stats;
@@ -99,6 +100,7 @@ struct vtnet_txq {
 	struct mtx		 vtntx_mtx;
 	struct vtnet_softc	*vtntx_sc;
 	struct virtqueue	*vtntx_vq;
+	struct sglist		*vtntx_sg;
 #ifndef VTNET_LEGACY_TX
 	struct buf_ring		*vtntx_br;
 #endif
@@ -143,9 +145,11 @@ struct vtnet_softc {
 	int			 vtnet_link_active;
 	int			 vtnet_hdr_size;
 	int			 vtnet_rx_process_limit;
+	int			 vtnet_rx_nsegs;
 	int			 vtnet_rx_nmbufs;
 	int			 vtnet_rx_clsize;
 	int			 vtnet_rx_new_clsize;
+	int			 vtnet_tx_nsegs;
 	int			 vtnet_if_flags;
 	int			 vtnet_act_vq_pairs;
 	int			 vtnet_max_vq_pairs;
@@ -293,11 +297,14 @@ CTASSERT(sizeof(struct vtnet_mac_filter)
 
 /*
  * Used to preallocate the Vq indirect descriptors. The first segment
- * is reserved for the header.
+ * is reserved for the header, except for mergeable buffers since the
+ * header is placed inline with the data.
  */
+#define VTNET_MRG_RX_SEGS	1
 #define VTNET_MIN_RX_SEGS	2
 #define VTNET_MAX_RX_SEGS	34
-#define VTNET_MAX_TX_SEGS	34
+#define VTNET_MIN_TX_SEGS	4
+#define VTNET_MAX_TX_SEGS	64
 
 /*
  * Assert we can receive and transmit the maximum with regular
@@ -314,7 +321,7 @@ CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLB
 
 /*
  * Determine how many mbufs are in each receive buffer. For LRO without
- * mergeable descriptors, we must allocate an mbuf chain large enough to
+ * mergeable buffers, we must allocate an mbuf chain large enough to
  * hold both the vtnet_rx_header and the maximum receivable data.
  */
 #define VTNET_NEEDED_RX_MBUFS(_sc, _clsize)				\



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201402020515.s125Fbxn061750>