Date: Tue, 20 Aug 2002 15:36:38 -0400 (EDT) From: Andrew Gallatin <gallatin@cs.duke.edu> To: Luigi Rizzo <rizzo@icir.org> Cc: freebsd-net@FreeBSD.ORG Subject: Re: m_getcl and end-to-end performance Message-ID: <15714.39494.661931.882244@grasshopper.cs.duke.edu> In-Reply-To: <20020820093939.B48541@iguana.icir.org> References: <15714.27671.533860.408996@grasshopper.cs.duke.edu> <20020820093939.B48541@iguana.icir.org>
next in thread | previous in thread | raw e-mail | index | archive | help
Luigi Rizzo writes:
>
> now if you have patches i'll be happy to have a look at them.
Here's what I'm running with now. It removes the M_PKTHDR
requirement, allowing me to use multiple m_getcl()'s to stock jumbo
frames.
The uicp_socket.c is just a cheesey hack to soreceive() so as to
pretend we do mcl_pool restocking in m_free().
For a netperf UDP_STREAM, I see ~1,000 to ~2,000 packets/sec increase
in throughput for sizes 256 through 2K. For large (8K) I see a
~20-30Mb/sec increase (but that's only a few hundred pkts/sec).
Drew
Index: uipc_mbuf.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/uipc_mbuf.c,v
retrieving revision 1.51.2.20
diff -u -r1.51.2.20 uipc_mbuf.c
--- uipc_mbuf.c 12 Aug 2002 22:09:12 -0000 1.51.2.20
+++ uipc_mbuf.c 20 Aug 2002 19:08:38 -0000
@@ -569,20 +569,22 @@
int s = splimp();
struct mbuf *mp;
+ if (type == MT_DATA && mcl_pool) {
+ mp = mcl_pool;
+ mcl_pool = mp->m_nextpkt;
+ mcl_pool_now--;
+ splx(s);
+ mp->m_nextpkt = NULL;
+ mp->m_data = mp->m_ext.ext_buf;
+ mp->m_flags = flags|M_EXT;
+ mp->m_pkthdr.rcvif = NULL;
+ mp->m_pkthdr.csum_flags = 0;
+ mp->m_pkthdr.aux = NULL;
+ return mp;
+ }
+
if (flags & M_PKTHDR) {
- if (type == MT_DATA && mcl_pool) {
- mp = mcl_pool;
- mcl_pool = mp->m_nextpkt;
- mcl_pool_now--;
- splx(s);
- mp->m_nextpkt = NULL;
- mp->m_data = mp->m_ext.ext_buf;
- mp->m_flags = M_PKTHDR|M_EXT;
- mp->m_pkthdr.rcvif = NULL;
- mp->m_pkthdr.csum_flags = 0;
- return mp;
- } else
- MGETHDR(mp, how, type);
+ MGETHDR(mp, how, type);
} else
MGET(mp, how, type);
if (mp) {
@@ -704,32 +706,36 @@
m_freem(m)
struct mbuf *m;
{
+ struct mbuf *m_tmp;
int s = splimp();
/*
* Try to keep a small pool of mbuf+cluster for quick use in
* device drivers. A good candidate is a M_PKTHDR buffer with
- * only one cluster attached. Other mbufs, or those exceeding
+ * at least one cluster attached. Other mbufs, or those exceeding
* the pool size, are just m_free'd in the usual way.
* The following code makes sure that m_next, m_type,
* m_pkthdr.aux and m_ext.* are properly initialized.
* Other fields in the mbuf are initialized in m_getcl()
* upon allocation.
*/
- if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL &&
- (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) &&
+ if (mcl_pool_now < mcl_pool_max && m &&
+ (m->m_flags & M_EXT) == M_EXT &&
m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
- if (m->m_pkthdr.aux) {
+ m_tmp = m->m_next;
+ m->m_next = NULL;
+ if ((m->m_flags & M_PKTHDR) == M_PKTHDR &&
+ m->m_pkthdr.aux) {
m_freem(m->m_pkthdr.aux);
- m->m_pkthdr.aux = NULL;
}
m->m_nextpkt = mcl_pool;
mcl_pool = m;
mcl_pool_now++;
- } else {
- while (m)
- m = m_free(m);
- }
+ m = m_tmp;
+ }
+ while (m)
+ m = m_free(m);
+
splx(s);
}
Index: uipc_socket.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/uipc_socket.c,v
retrieving revision 1.68.2.21
diff -u -r1.68.2.21 uipc_socket.c
--- uipc_socket.c 1 May 2002 03:27:35 -0000 1.68.2.21
+++ uipc_socket.c 20 Aug 2002 16:14:06 -0000
@@ -865,7 +865,13 @@
so->so_rcv.sb_mb = m = m->m_next;
*mp = (struct mbuf *)0;
} else {
- so->so_rcv.sb_mb = m = m_free(m);
+ struct mbuf *m_tmp;
+
+ m_tmp = m->m_next;
+ m->m_next = NULL;
+/* so->so_rcv.sb_mb = m = m_free(m);*/
+ m_freem (m);
+ so->so_rcv.sb_mb = m = m_tmp;
}
if (m)
m->m_nextpkt = nextrecord;
To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe freebsd-net" in the body of the message
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?15714.39494.661931.882244>
