Date: Sat, 29 Jun 2019 00:50:25 +0000 (UTC) From: John Baldwin <jhb@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r349531 - head/sys/kern Message-ID: <201906290050.x5T0oPJg033332@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: jhb Date: Sat Jun 29 00:50:25 2019 New Revision: 349531 URL: https://svnweb.freebsd.org/changeset/base/349531 Log: Compress pending socket buffer data once it is marked ready. Apply similar logic from sbcompress to pending data in the socket buffer once it is marked ready via sbready. Normally sbcompress merges small mbufs to reduce the length of mbuf chains in the socket buffer. However, sbcompress cannot do this for mbufs marked M_NOTREADY. sbcompress_ready is now called from sbready when mbufs are marked ready to merge small mbuf chains once the data is available to copy. Submitted by: gallatin (earlier version) Reviewed by: gallatin, hselasky, rrs Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D20616 Modified: head/sys/kern/uipc_sockbuf.c Modified: head/sys/kern/uipc_sockbuf.c ============================================================================== --- head/sys/kern/uipc_sockbuf.c Sat Jun 29 00:49:35 2019 (r349530) +++ head/sys/kern/uipc_sockbuf.c Sat Jun 29 00:50:25 2019 (r349531) @@ -89,6 +89,78 @@ sbm_clrprotoflags(struct mbuf *m, int flags) } /* + * Compress M_NOTREADY mbufs after they have been readied by sbready(). + * + * sbcompress() skips M_NOTREADY mbufs since the data is not available to + * be copied at the time of sbcompress(). This function combines small + * mbufs similar to sbcompress() once mbufs are ready. 'm0' is the first + * mbuf sbready() marked ready, and 'end' is the first mbuf still not + * ready. + */ +static void +sbready_compress(struct sockbuf *sb, struct mbuf *m0, struct mbuf *end) +{ + struct mbuf *m, *n; + int ext_size; + + SOCKBUF_LOCK_ASSERT(sb); + + if ((sb->sb_flags & SB_NOCOALESCE) != 0) + return; + + for (m = m0; m != end; m = m->m_next) { + MPASS((m->m_flags & M_NOTREADY) == 0); + + /* Compress small unmapped mbufs into plain mbufs. */ + if ((m->m_flags & M_NOMAP) && m->m_len <= MLEN) { + MPASS(m->m_flags & M_EXT); + ext_size = m->m_ext.ext_size; + if (mb_unmapped_compress(m) == 0) { + sb->sb_mbcnt -= ext_size; + sb->sb_ccnt -= 1; + } + } + + /* + * NB: In sbcompress(), 'n' is the last mbuf in the + * socket buffer and 'm' is the new mbuf being copied + * into the trailing space of 'n'. Here, the roles + * are reversed and 'n' is the next mbuf after 'm' + * that is being copied into the trailing space of + * 'm'. + */ + n = m->m_next; + while ((n != NULL) && (n != end) && (m->m_flags & M_EOR) == 0 && + M_WRITABLE(m) && + (m->m_flags & M_NOMAP) == 0 && + n->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ + n->m_len <= M_TRAILINGSPACE(m) && + m->m_type == n->m_type) { + KASSERT(sb->sb_lastrecord != n, + ("%s: merging start of record (%p) into previous mbuf (%p)", + __func__, n, m)); + m_copydata(n, 0, n->m_len, mtodo(m, m->m_len)); + m->m_len += n->m_len; + m->m_next = n->m_next; + m->m_flags |= n->m_flags & M_EOR; + if (sb->sb_mbtail == n) + sb->sb_mbtail = m; + + sb->sb_mbcnt -= MSIZE; + sb->sb_mcnt -= 1; + if (n->m_flags & M_EXT) { + sb->sb_mbcnt -= n->m_ext.ext_size; + sb->sb_ccnt -= 1; + } + m_free(n); + n = m->m_next; + } + } + SBLASTRECORDCHK(sb); + SBLASTMBUFCHK(sb); +} + +/* * Mark ready "count" units of I/O starting with "m". Most mbufs * count as a single unit of I/O except for EXT_PGS-backed mbufs which * can be backed by multiple pages. @@ -138,6 +210,7 @@ sbready(struct sockbuf *sb, struct mbuf *m0, int count } if (!blocker) { + sbready_compress(sb, m0, m); return (EINPROGRESS); } @@ -150,6 +223,7 @@ sbready(struct sockbuf *sb, struct mbuf *m0, int count } sb->sb_fnrdy = m; + sbready_compress(sb, m0, m); return (0); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201906290050.x5T0oPJg033332>