> + goto fail_hdr_dmamap_destroy; > + > + error =3D sglist_append_phys(sg, hdr_paddr, sc->vtnet_hdr_size); > if (error !=3D 0 || sg->sg_nseg !=3D 1) { > KASSERT(0, ("%s: cannot add header to sglist error %d nse= g %d", > __func__, error, sg->sg_nseg)); > - goto fail; > + goto fail_hdr_dmamap_unload; > } > > - error =3D sglist_append_mbuf(sg, m); > + bus_dmamap_sync(sc->vtnet_tx_dmat, hdr_dmap, BUS_DMASYNC_PREWRITE= ); > + > + error =3D bus_dmamap_create(sc->vtnet_tx_dmat, 0, &dmap); > + if (error) > + goto fail_hdr_dmamap_unload; > + > + nsegs =3D 0; > + error =3D bus_dmamap_load_mbuf_sg(sc->vtnet_tx_dmat, dmap, m, seg= s, > + &nsegs, BUS_DMA_NOWAIT); > + if (error !=3D 0) > + goto fail_dmamap_destroy; > + KASSERT(nsegs <=3D sc->vtnet_tx_nsegs, > + ("%s: unexpected number of DMA segments for tx buffer: %d (ma= x %d)", > + __func__, nsegs, sc->vtnet_tx_nsegs)); > + > + bus_dmamap_sync(sc->vtnet_tx_dmat, dmap, BUS_DMASYNC_PREWRITE); > + > + for (i =3D 0; i < nsegs && !error; i++) > + error =3D sglist_append_phys(sg, segs[i].ds_addr, segs[i]= .ds_len); > + > if (error) { > + sglist_reset(sg); > + bus_dmamap_unload(sc->vtnet_tx_dmat, dmap); > + > + error =3D sglist_append_phys(sg, hdr_paddr, sc->vtnet_hdr= _size); > + if (error !=3D 0 || sg->sg_nseg !=3D 1) { > + KASSERT(0, ("%s: cannot add header to sglist erro= r %d nseg %d", > + __func__, error, sg->sg_nseg)); > + goto fail_dmamap_destroy; > + } > + > m =3D m_defrag(m, M_NOWAIT); > if (m =3D=3D NULL) { > sc->vtnet_stats.tx_defrag_failed++; > @@ -2545,16 +2761,41 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, stru= ct mbuf **m_head, > *m_head =3D m; > sc->vtnet_stats.tx_defragged++; > > - error =3D sglist_append_mbuf(sg, m); > + nsegs =3D 0; > + error =3D bus_dmamap_load_mbuf_sg(sc->vtnet_tx_dmat, dmap= , m, > + segs, &nsegs, BUS_DMA_NOWAIT); > + if (error !=3D 0) > + goto fail_dmamap_destroy; > + KASSERT(nsegs <=3D sc->vtnet_tx_nsegs, > + ("%s: unexpected number of DMA segments for tx buffer= : %d (max %d)", > + __func__, nsegs, sc->vtnet_tx_nsegs)); > + > + bus_dmamap_sync(sc->vtnet_tx_dmat, dmap, BUS_DMASYNC_PREW= RITE); > + > + for (i =3D 0; i < nsegs && !error; i++) > + error =3D sglist_append_phys(sg, segs[i].ds_addr, > + segs[i].ds_len); > + > if (error) > - goto fail; > + goto fail_dmamap_unload; > } > > txhdr->vth_mbuf =3D m; > + txhdr->dmap =3D dmap; > + txhdr->hdr_dmap =3D hdr_dmap; > + > error =3D virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0); > > return (error); > > +fail_dmamap_unload: > + bus_dmamap_unload(sc->vtnet_tx_dmat, dmap); > +fail_dmamap_destroy: > + bus_dmamap_destroy(sc->vtnet_tx_dmat, dmap); > +fail_hdr_dmamap_unload: > + bus_dmamap_unload(sc->vtnet_tx_dmat, hdr_dmap); > +fail_hdr_dmamap_destroy: > + bus_dmamap_destroy(sc->vtnet_tx_dmat, hdr_dmap); > fail: > m_freem(*m_head); > *m_head =3D NULL; > diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/networ= k/if_vtnetvar.h > index eb5e6784b07f..6cafe827d733 100644 > --- a/sys/dev/virtio/network/if_vtnetvar.h > +++ b/sys/dev/virtio/network/if_vtnetvar.h > @@ -190,6 +190,12 @@ struct vtnet_softc { > struct mtx vtnet_mtx; > char vtnet_mtx_name[16]; > uint8_t vtnet_hwaddr[ETHER_ADDR_LEN]; > + > + bus_dma_tag_t vtnet_rx_dmat; > + struct mtx vtnet_rx_mtx; > + > + bus_dma_tag_t vtnet_tx_dmat; > + struct mtx vtnet_tx_mtx; > }; > /* vtnet flag descriptions for use with printf(9) %b identifier. */ > #define VTNET_FLAGS_BITS \ > @@ -273,6 +279,10 @@ struct vtnet_tx_header { > } vth_uhdr; > > struct mbuf *vth_mbuf; > + > + bus_dmamap_t dmap; > + > + bus_dmamap_t hdr_dmap; > }; > > /* > I believe this is causing a massive memory leak in the devbuf malloc type. I can't even get through a build over nfs on a VM without hitting OOM. Reverting b5bad6df467cc95bea641afe674c55cd5b9f1510 and c16c95192f01237a876eb7bc336e3bbda9310171 fixes the leak. Can you please fix or revert this? You can monitor the leak with something like vmstat -m | grep devbuf > @@ -1562,13 +1643,43 @@ vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nb= ufs, struct mbuf **m_tailp) > } > > m->m_len =3D size; > + vthdr =3D (struct vtnet_rx_buffer_header *)m->m_data; > + > + /* Reserve space for header */ > + m_adj(m, VTNET_RX_BUFFER_HEADER_OFFSET); > + > /* > * Need to offset the mbuf if the header we're going to a= dd > * will misalign. > */ > - if (VTNET_ETHER_ALIGN !=3D 0 && sc->vtnet_hdr_size % 4 = =3D=3D 0) { > + if (VTNET_ETHER_ALIGN !=3D 0 && sc->vtnet_hdr_size % 4 = =3D=3D 0) > m_adj(m, VTNET_ETHER_ALIGN); > + > + err =3D bus_dmamap_create(sc->vtnet_rx_dmat, 0, &dmap); > + if (err) { > + printf("Failed to create dmamap, err :%d\n", > + err); > + m_freem(m); > + return (NULL); > } > + > + nsegs =3D 0; > + err =3D bus_dmamap_load_mbuf_sg(sc->vtnet_rx_dmat, dmap, = m, segs, > + &nsegs, BUS_DMA_NOWAIT); Where is the bus_dmamap_unload and bus_dmamap_destroy for the rx bufs? Ryan