Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 19 Feb 2015 01:19:43 +0000 (UTC)
From:      Gleb Smirnoff <glebius@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r278977 - in head/sys: dev/cxgb dev/cxgb/sys dev/cxgb/ulp/tom dev/xen/netfront sys
Message-ID:  <201502190119.t1J1JhSI025601@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: glebius
Date: Thu Feb 19 01:19:42 2015
New Revision: 278977
URL: https://svnweb.freebsd.org/changeset/base/278977

Log:
  Provide a set of inline functions to manage simple mbuf(9) queues, based
  on queue(3)'s STAILQ.  Utilize them in cxgb(4) and Xen, deleting home
  grown implementations.
  
  Sponsored by:	Netflix
  Sponsored by:	Nginx, Inc.

Deleted:
  head/sys/dev/cxgb/sys/mbufq.h
  head/sys/dev/xen/netfront/mbufq.h
Modified:
  head/sys/dev/cxgb/cxgb_adapter.h
  head/sys/dev/cxgb/cxgb_sge.c
  head/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c
  head/sys/dev/cxgb/ulp/tom/cxgb_toepcb.h
  head/sys/dev/xen/netfront/netfront.c
  head/sys/sys/mbuf.h

Modified: head/sys/dev/cxgb/cxgb_adapter.h
==============================================================================
--- head/sys/dev/cxgb/cxgb_adapter.h	Wed Feb 18 23:34:03 2015	(r278976)
+++ head/sys/dev/cxgb/cxgb_adapter.h	Thu Feb 19 01:19:42 2015	(r278977)
@@ -59,7 +59,6 @@ $FreeBSD$
 #include <dev/pci/pcivar.h>
 
 #include <cxgb_osdep.h>
-#include <sys/mbufq.h>
 
 struct adapter;
 struct sge_qset;
@@ -251,7 +250,7 @@ struct sge_txq {
 	bus_dma_tag_t	desc_tag;
 	bus_dmamap_t	desc_map;
 	bus_dma_tag_t   entry_tag;
-	struct mbuf_head sendq;
+	struct mbufq	sendq;
 
 	struct buf_ring *txq_mr;
 	struct ifaltq	*txq_ifq;

Modified: head/sys/dev/cxgb/cxgb_sge.c
==============================================================================
--- head/sys/dev/cxgb/cxgb_sge.c	Wed Feb 18 23:34:03 2015	(r278976)
+++ head/sys/dev/cxgb/cxgb_sge.c	Thu Feb 19 01:19:42 2015	(r278977)
@@ -1117,9 +1117,10 @@ init_qset_cntxt(struct sge_qset *qs, u_i
 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
 
-	mbufq_init(&qs->txq[TXQ_ETH].sendq);
-	mbufq_init(&qs->txq[TXQ_OFLD].sendq);
-	mbufq_init(&qs->txq[TXQ_CTRL].sendq);
+	/* XXX: a sane limit is needed instead of INT_MAX */
+	mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
+	mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
+	mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
 }
 
 
@@ -1820,8 +1821,8 @@ check_desc_avail(adapter_t *adap, struct
 	 * the control queue is only used for binding qsets which happens
 	 * at init time so we are guaranteed enough descriptors
 	 */
-	if (__predict_false(!mbufq_empty(&q->sendq))) {
-addq_exit:	mbufq_tail(&q->sendq, m);
+	if (__predict_false(mbufq_len(&q->sendq))) {
+addq_exit:	(void )mbufq_enqueue(&q->sendq, m);
 		return 1;
 	}
 	if (__predict_false(q->size - q->in_use < ndesc)) {
@@ -1936,7 +1937,7 @@ again:	reclaim_completed_tx_imm(q);
 		}
 		q->in_use++;
 	}
-	if (!mbufq_empty(&q->sendq)) {
+	if (mbufq_len(&q->sendq)) {
 		setbit(&qs->txq_stopped, TXQ_CTRL);
 
 		if (should_restart_tx(q) &&
@@ -2319,7 +2320,7 @@ restart_offloadq(void *data, int npendin
 	TXQ_LOCK(qs);
 again:	cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
 
-	while ((m = mbufq_peek(&q->sendq)) != NULL) {
+	while ((m = mbufq_first(&q->sendq)) != NULL) {
 		unsigned int gen, pidx;
 		struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
 		unsigned int ndesc = G_HDR_NDESC(oh->flags);
@@ -2485,7 +2486,7 @@ t3_sge_alloc_qset(adapter_t *sc, u_int i
 			printf("error %d from alloc ring tx %i\n", ret, i);
 			goto err;
 		}
-		mbufq_init(&q->txq[i].sendq);
+		mbufq_init(&q->txq[i].sendq, INT_MAX);
 		q->txq[i].gen = 1;
 		q->txq[i].size = p->txq_size[i];
 	}
@@ -3521,7 +3522,7 @@ t3_add_configured_sysctls(adapter_t *sc)
 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
 			    "#tunneled packets dropped");
 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
-			    CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
+			    CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
 			    0, "#tunneled packets waiting to be sent");
 #if 0			
 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",

Modified: head/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c
==============================================================================
--- head/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c	Wed Feb 18 23:34:03 2015	(r278976)
+++ head/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c	Thu Feb 19 01:19:42 2015	(r278977)
@@ -1088,7 +1088,7 @@ send_reset(struct toepcb *toep)
 	req->cmd = CPL_ABORT_SEND_RST;
 
 	if (tp->t_state == TCPS_SYN_SENT)
-		mbufq_tail(&toep->out_of_order_queue, m); /* defer */
+		(void )mbufq_enqueue(&toep->out_of_order_queue, m); /* defer */
 	else
 		l2t_send(sc, m, toep->tp_l2t);
 }

Modified: head/sys/dev/cxgb/ulp/tom/cxgb_toepcb.h
==============================================================================
--- head/sys/dev/cxgb/ulp/tom/cxgb_toepcb.h	Wed Feb 18 23:34:03 2015	(r278976)
+++ head/sys/dev/cxgb/ulp/tom/cxgb_toepcb.h	Thu Feb 19 01:19:42 2015	(r278977)
@@ -30,7 +30,7 @@
 #define CXGB_TOEPCB_H_
 #include <sys/bus.h>
 #include <sys/condvar.h>
-#include <sys/mbufq.h>
+#include <sys/limits.h>
 
 #define TP_DATASENT         	(1 << 0)
 #define TP_TX_WAIT_IDLE      	(1 << 1)
@@ -64,26 +64,26 @@ struct toepcb {
 	struct inpcb 		*tp_inp;
 	struct mbuf		*tp_m_last;
 
-	struct mbuf_head 	wr_list;
-	struct mbuf_head 	out_of_order_queue;
+	struct mbufq 		wr_list;
+	struct mbufq 		out_of_order_queue;
 };
 
 static inline void
 reset_wr_list(struct toepcb *toep)
 {
-	mbufq_init(&toep->wr_list);
+	mbufq_init(&toep->wr_list, INT_MAX);	/* XXX: sane limit needed */
 }
 
 static inline void
 enqueue_wr(struct toepcb *toep, struct mbuf *m)
 {
-	mbufq_tail(&toep->wr_list, m);
+	(void )mbufq_enqueue(&toep->wr_list, m);
 }
 
 static inline struct mbuf *
 peek_wr(const struct toepcb *toep)
 {
-	return (mbufq_peek(&toep->wr_list));
+	return (mbufq_first(&toep->wr_list));
 }
 
 static inline struct mbuf *

Modified: head/sys/dev/xen/netfront/netfront.c
==============================================================================
--- head/sys/dev/xen/netfront/netfront.c	Wed Feb 18 23:34:03 2015	(r278976)
+++ head/sys/dev/xen/netfront/netfront.c	Thu Feb 19 01:19:42 2015	(r278977)
@@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/sockio.h>
+#include <sys/limits.h>
 #include <sys/mbuf.h>
 #include <sys/malloc.h>
 #include <sys/module.h>
@@ -87,8 +88,6 @@ __FBSDID("$FreeBSD$");
 
 #include <machine/xen/xenvar.h>
 
-#include <dev/xen/netfront/mbufq.h>
-
 #include "xenbus_if.h"
 
 /* Features supported by all backends.  TSO and LRO can be negotiated */
@@ -277,7 +276,7 @@ struct netfront_info {
 	int			rx_ring_ref;
 	uint8_t			mac[ETHER_ADDR_LEN];
 	struct xn_chain_data	xn_cdata;	/* mbufs */
-	struct mbuf_head	xn_rx_batch;	/* head of the batch queue */
+	struct mbufq		xn_rx_batch;	/* batch queue */
 
 	int			xn_if_flags;
 	struct callout	        xn_stat_ch;
@@ -837,7 +836,7 @@ no_mbuf:
 		m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
 		
 		/* queue the mbufs allocated */
-		mbufq_tail(&sc->xn_rx_batch, m_new);
+		(void )mbufq_enqueue(&sc->xn_rx_batch, m_new);
 	}
 	
 	/*
@@ -973,7 +972,7 @@ xn_rxeof(struct netfront_info *np)
 	RING_IDX i, rp;
 	multicall_entry_t *mcl;
 	struct mbuf *m;
-	struct mbuf_head rxq, errq;
+	struct mbufq rxq, errq;
 	int err, pages_flipped = 0, work_to_do;
 
 	do {
@@ -981,8 +980,9 @@ xn_rxeof(struct netfront_info *np)
 		if (!netfront_carrier_ok(np))
 			return;
 
-		mbufq_init(&errq);
-		mbufq_init(&rxq);
+		/* XXX: there should be some sane limit. */
+		mbufq_init(&errq, INT_MAX);
+		mbufq_init(&rxq, INT_MAX);
 
 		ifp = np->xn_ifp;
 	
@@ -1000,7 +1000,7 @@ xn_rxeof(struct netfront_info *np)
 
 			if (__predict_false(err)) {
 				if (m)
-					mbufq_tail(&errq, m);
+					(void )mbufq_enqueue(&errq, m);
 				np->stats.rx_errors++;
 				continue;
 			}
@@ -1022,7 +1022,7 @@ xn_rxeof(struct netfront_info *np)
 			np->stats.rx_packets++;
 			np->stats.rx_bytes += m->m_pkthdr.len;
 
-			mbufq_tail(&rxq, m);
+			(void )mbufq_enqueue(&rxq, m);
 			np->rx.rsp_cons = i;
 		}
 
@@ -1046,8 +1046,7 @@ xn_rxeof(struct netfront_info *np)
 			}
 		}
 	
-		while ((m = mbufq_dequeue(&errq)))
-			m_freem(m);
+		mbufq_drain(&errq);
 
 		/* 
 		 * Process all the mbufs after the remapping is complete.

Modified: head/sys/sys/mbuf.h
==============================================================================
--- head/sys/sys/mbuf.h	Wed Feb 18 23:34:03 2015	(r278976)
+++ head/sys/sys/mbuf.h	Thu Feb 19 01:19:42 2015	(r278977)
@@ -1199,5 +1199,101 @@ rt_m_getfib(struct mbuf *m)
  #define M_PROFILE(m)
 #endif
 
+struct mbufq {
+	STAILQ_HEAD(, mbuf)	mq_head;
+	int			mq_len;
+	int			mq_maxlen;
+};
 
+static inline void
+mbufq_init(struct mbufq *mq, int maxlen)
+{
+
+	STAILQ_INIT(&mq->mq_head);
+	mq->mq_maxlen = maxlen;
+	mq->mq_len = 0;
+}
+
+static inline struct mbuf *
+mbufq_flush(struct mbufq *mq)
+{
+	struct mbuf *m;
+
+	m = STAILQ_FIRST(&mq->mq_head);
+	STAILQ_INIT(&mq->mq_head);
+	mq->mq_len = 0;
+	return (m);
+}
+
+static inline void
+mbufq_drain(struct mbufq *mq)
+{
+	struct mbuf *m, *n;
+
+	n = mbufq_flush(mq);
+	while ((m = n) != NULL) {
+		n = STAILQ_NEXT(m, m_stailqpkt);
+		m_freem(m);
+	}
+}
+
+static inline struct mbuf *
+mbufq_first(const struct mbufq *mq)
+{
+
+	return (STAILQ_FIRST(&mq->mq_head));
+}
+
+static inline struct mbuf *
+mbufq_last(const struct mbufq *mq)
+{
+
+	return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt));
+}
+
+static inline int
+mbufq_full(const struct mbufq *mq)
+{
+
+	return (mq->mq_len >= mq->mq_maxlen);
+}
+
+static inline int
+mbufq_len(const struct mbufq *mq)
+{
+
+	return (mq->mq_len);
+}
+
+static inline int
+mbufq_enqueue(struct mbufq *mq, struct mbuf *m)
+{
+
+	if (mbufq_full(mq))
+		return (ENOBUFS);
+	STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt);
+	mq->mq_len++;
+	return (0);
+}
+
+static inline struct mbuf *
+mbufq_dequeue(struct mbufq *mq)
+{
+	struct mbuf *m;
+
+	m = STAILQ_FIRST(&mq->mq_head);
+	if (m) {
+		STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt);
+		mq->mq_len--;
+	}
+	return (m);
+}
+
+static inline void
+mbufq_prepend(struct mbufq *mq, struct mbuf *m)
+{
+
+	STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt);
+	mq->mq_len++;
+}
 #endif /* !_SYS_MBUF_H_ */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201502190119.t1J1JhSI025601>