Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 20 Aug 2023 08:18:03 GMT
From:      Dmitry Salychev <dsl@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: 58983e4b0253 - main - dpaa2: Clean up channels in separate tasks
Message-ID:  <202308200818.37K8I3ab017277@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by dsl:

URL: https://cgit.FreeBSD.org/src/commit/?id=58983e4b0253ad38a3e1ef2166fedd3133fdb552

commit 58983e4b0253ad38a3e1ef2166fedd3133fdb552
Author:     Dmitry Salychev <dsl@FreeBSD.org>
AuthorDate: 2023-06-18 15:03:24 +0000
Commit:     Dmitry Salychev <dsl@FreeBSD.org>
CommitDate: 2023-08-20 08:17:26 +0000

    dpaa2: Clean up channels in separate tasks
    
    Each channel gets its own DMA resources, cleanup and "bufferpool"
    tasks, and a separate cleanup taskqueue to isolate channels operation
    as much as possible to avoid various kernel panics under heavy network
    load.
    
    As a side-effect of this work, dpaa2_buf structure is simplified and
    all of the functions to re-seed those buffers are gathered now in
    dpaa2_buf.h and .c files; functions to work with channels are
    extracted into dpaa2_channel.h and .c files as well.
    
    Reported by:            dch
    Reviewed by:            bz
    Approved by:            bz (mentor)
    MFC after:              1 week
    Differential Revision:  https://reviews.freebsd.org/D41296
---
 sys/conf/files.arm64          |    3 +
 sys/dev/dpaa2/dpaa2_buf.c     |  246 +++++++
 sys/dev/dpaa2/dpaa2_buf.h     |  173 +++++
 sys/dev/dpaa2/dpaa2_channel.c |  557 ++++++++++++++++
 sys/dev/dpaa2/dpaa2_channel.h |   95 +++
 sys/dev/dpaa2/dpaa2_io.c      |   23 +-
 sys/dev/dpaa2/dpaa2_io.h      |    8 +-
 sys/dev/dpaa2/dpaa2_mc.c      |   64 --
 sys/dev/dpaa2/dpaa2_mcp.c     |    1 -
 sys/dev/dpaa2/dpaa2_mcp.h     |    1 -
 sys/dev/dpaa2/dpaa2_ni.c      | 1483 ++++++++++++++---------------------------
 sys/dev/dpaa2/dpaa2_ni.h      |  160 +----
 sys/dev/dpaa2/dpaa2_swp.c     |   15 +-
 sys/dev/dpaa2/dpaa2_swp.h     |    5 +-
 sys/dev/dpaa2/dpaa2_types.c   |  114 ++++
 sys/dev/dpaa2/dpaa2_types.h   |  113 ++--
 sys/modules/dpaa2/Makefile    |   14 +-
 17 files changed, 1833 insertions(+), 1242 deletions(-)

diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index 61f1cbf75982..2036bbe918ba 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -202,6 +202,8 @@ dev/axgbe/xgbe-phy-v1.c				optional axa fdt
 dev/cpufreq/cpufreq_dt.c			optional cpufreq fdt
 
 dev/dpaa2/dpaa2_bp.c				optional soc_nxp_ls dpaa2
+dev/dpaa2/dpaa2_buf.c				optional soc_nxp_ls dpaa2
+dev/dpaa2/dpaa2_channel.c			optional soc_nxp_ls dpaa2
 dev/dpaa2/dpaa2_cmd_if.m			optional soc_nxp_ls dpaa2
 dev/dpaa2/dpaa2_con.c				optional soc_nxp_ls dpaa2
 dev/dpaa2/dpaa2_console.c			optional soc_nxp_ls dpaa2 fdt
@@ -216,6 +218,7 @@ dev/dpaa2/dpaa2_ni.c				optional soc_nxp_ls dpaa2
 dev/dpaa2/dpaa2_rc.c				optional soc_nxp_ls dpaa2
 dev/dpaa2/dpaa2_swp.c				optional soc_nxp_ls dpaa2
 dev/dpaa2/dpaa2_swp_if.m			optional soc_nxp_ls dpaa2
+dev/dpaa2/dpaa2_types.c				optional soc_nxp_ls dpaa2
 dev/dpaa2/memac_mdio_acpi.c			optional soc_nxp_ls dpaa2 acpi
 dev/dpaa2/memac_mdio_common.c			optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt
 dev/dpaa2/memac_mdio_fdt.c			optional soc_nxp_ls dpaa2 fdt
diff --git a/sys/dev/dpaa2/dpaa2_buf.c b/sys/dev/dpaa2/dpaa2_buf.c
new file mode 100644
index 000000000000..7739eda5d8de
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_buf.c
@@ -0,0 +1,246 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2023 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_buf.h"
+#include "dpaa2_bp.h"
+#include "dpaa2_channel.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_swp_if.h"
+#include "dpaa2_ni.h"
+
+MALLOC_DEFINE(M_DPAA2_RXB, "dpaa2_rxb", "DPAA2 DMA-mapped buffer (Rx)");
+
+/**
+ * @brief Allocate Rx buffers visible to QBMan and release them to the
+ * buffer pool.
+ */
+int
+dpaa2_buf_seed_pool(device_t dev, device_t bpdev, void *arg, uint32_t count,
+    int size, struct mtx *dma_mtx)
+{
+	struct dpaa2_ni_softc *sc = device_get_softc(dev);
+	struct dpaa2_bp_softc *bpsc = device_get_softc(bpdev);
+	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
+	struct dpaa2_buf *buf;
+	const int alloc = DPAA2_ATOMIC_READ(&sc->buf_num);
+	const uint16_t bpid = bpsc->attr.bpid;
+	bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
+	int error, bufn = 0;
+
+#if defined(INVARIANTS)
+	KASSERT(ch->rx_dmat != NULL, ("%s: no DMA tag?", __func__));
+	if (dma_mtx != NULL) {
+		mtx_assert(dma_mtx, MA_OWNED);
+	}
+#endif /* INVARIANTS */
+
+#ifdef _notyet_
+	/* Limit amount of buffers released to the pool */
+	count = (alloc + count > DPAA2_NI_BUFS_MAX)
+	    ? DPAA2_NI_BUFS_MAX - alloc : count;
+#endif
+
+	/* Release "count" buffers to the pool */
+	for (int i = alloc; i < alloc + count; i++) {
+		/* Enough buffers were allocated for a single command */
+		if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
+			error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr,
+			    bufn);
+			if (error) {
+				device_printf(sc->dev, "%s: failed to release "
+				    "buffers to the pool (1)\n", __func__);
+				return (error);
+			}
+			DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
+			bufn = 0;
+		}
+
+		buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_RXB, M_NOWAIT);
+		if (buf == NULL) {
+			device_printf(dev, "%s: malloc() failed\n", __func__);
+			return (ENOMEM);
+		}
+		DPAA2_BUF_INIT_TAGOPT(buf, ch->rx_dmat, ch);
+
+		error = dpaa2_buf_seed_rxb(dev, buf, size, dma_mtx);
+		if (error != 0) {
+			device_printf(dev, "%s: dpaa2_buf_seed_rxb() failed: "
+			    "error=%d/n", __func__, error);
+			break;
+		}
+		paddr[bufn] = buf->paddr;
+		bufn++;
+	}
+
+	/* Release reminder of the buffers to the pool */
+	if (bufn > 0) {
+		error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr, bufn);
+		if (error) {
+			device_printf(sc->dev, "%s: failed to release "
+			    "buffers to the pool (2)\n", __func__);
+			return (error);
+		}
+		DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
+	}
+
+	return (0);
+}
+
+/**
+ * @brief Prepare Rx buffer to be released to the buffer pool.
+ */
+int
+dpaa2_buf_seed_rxb(device_t dev, struct dpaa2_buf *buf, int size,
+    struct mtx *dma_mtx)
+{
+	struct dpaa2_ni_softc *sc = device_get_softc(dev);
+	struct dpaa2_fa *fa;
+	bool map_created = false;
+	bool mbuf_alloc = false;
+	int error;
+
+#if defined(INVARIANTS)
+	DPAA2_BUF_ASSERT_RXPREP(buf);
+	if (dma_mtx != NULL) {
+		mtx_assert(dma_mtx, MA_OWNED);
+	}
+#endif /* INVARIANTS */	
+
+	if (__predict_false(buf->dmap == NULL)) {
+		error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
+		if (error != 0) {
+			device_printf(dev, "%s: failed to create DMA map: "
+			    "error=%d\n", __func__, error);
+			goto fail_map_create;
+		}
+		map_created = true;
+	}
+
+	if (__predict_true(buf->m == NULL)) {
+		buf->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
+		if (__predict_false(buf->m == NULL)) {
+			device_printf(dev, "%s: m_getjcl() failed\n", __func__);
+			error = ENOMEM;
+			goto fail_mbuf_alloc;
+		}
+		buf->m->m_len = buf->m->m_ext.ext_size;
+		buf->m->m_pkthdr.len = buf->m->m_ext.ext_size;
+		mbuf_alloc = true;
+	}
+
+	error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, buf->m, &buf->seg,
+	    &buf->nseg, BUS_DMA_NOWAIT);
+	KASSERT(buf->nseg == 1, ("%s: one segment expected: nseg=%d", __func__,
+	    buf->nseg));
+	KASSERT(error == 0, ("%s: bus_dmamap_load_mbuf_sg() failed: error=%d",
+	    __func__, error));
+	if (__predict_false(error != 0 || buf->nseg != 1)) {
+		device_printf(sc->dev, "%s: bus_dmamap_load_mbuf_sg() failed: "
+		    "error=%d, nsegs=%d\n", __func__, error, buf->nseg);
+		goto fail_mbuf_map;
+	}
+	buf->paddr = buf->seg.ds_addr;
+	buf->vaddr = buf->m->m_data;
+
+	/* Populate frame annotation for future use */
+	fa = (struct dpaa2_fa *)buf->vaddr;
+	fa->magic = DPAA2_MAGIC;
+	fa->buf = buf;
+
+	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREREAD);
+
+	DPAA2_BUF_ASSERT_RXREADY(buf);
+
+	return (0);
+
+fail_mbuf_map:
+	if (mbuf_alloc) {
+		m_freem(buf->m);
+		buf->m = NULL;
+	}
+fail_mbuf_alloc:
+	if (map_created) {
+		(void)bus_dmamap_destroy(buf->dmat, buf->dmap);
+	}
+fail_map_create:
+	return (error);
+}
+
+/**
+ * @brief Prepare Tx buffer to be added to the Tx ring.
+ */
+int
+dpaa2_buf_seed_txb(device_t dev, struct dpaa2_buf *buf)
+{
+	struct dpaa2_buf *sgt = buf->sgt;
+	bool map_created = false;
+	int error;
+
+	DPAA2_BUF_ASSERT_TXPREP(buf);
+
+	if (buf->dmap == NULL) {
+		error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
+		if (error != 0) {
+			device_printf(dev, "%s: bus_dmamap_create() failed: "
+			    "error=%d\n", __func__, error);
+			goto fail_map_create;
+		}
+		map_created = true;
+	}
+
+	if (sgt->vaddr == NULL) {
+		error = bus_dmamem_alloc(sgt->dmat, (void **)&sgt->vaddr,
+		    BUS_DMA_ZERO | BUS_DMA_COHERENT, &sgt->dmap);
+		if (error != 0) {
+			device_printf(dev, "%s: bus_dmamem_alloc() failed: "
+			    "error=%d\n", __func__, error);
+			goto fail_mem_alloc;
+		}
+	}
+
+	DPAA2_BUF_ASSERT_TXREADY(buf);
+
+	return (0);
+
+fail_mem_alloc:
+	if (map_created) {
+		(void)bus_dmamap_destroy(buf->dmat, buf->dmap);
+	}
+fail_map_create:
+	return (error);
+}
diff --git a/sys/dev/dpaa2/dpaa2_buf.h b/sys/dev/dpaa2/dpaa2_buf.h
new file mode 100644
index 000000000000..853a4fa78d3a
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_buf.h
@@ -0,0 +1,173 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2023 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef	_DPAA2_BUF_H
+#define	_DPAA2_BUF_H
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+
+#define DPAA2_RX_BUF_SIZE	(MJUM9BYTES)
+
+struct dpaa2_buf {
+	bus_addr_t		 paddr;
+	caddr_t			 vaddr;
+	bus_dma_tag_t		 dmat;
+	bus_dmamap_t		 dmap;
+	bus_dma_segment_t	 seg;
+	int			 nseg;
+	struct mbuf		*m;
+	struct dpaa2_buf	*sgt;
+	void			*opt;
+};
+
+#define DPAA2_BUF_INIT_TAGOPT(__buf, __tag, __opt) do {			\
+	KASSERT((__buf) != NULL, ("%s: buf is NULL", __func__));	\
+									\
+	(__buf)->paddr = 0;						\
+	(__buf)->vaddr = NULL;						\
+	(__buf)->dmat = (__tag);					\
+	(__buf)->dmap = NULL;						\
+	(__buf)->seg.ds_addr = 0;					\
+	(__buf)->seg.ds_len = 0;					\
+	(__buf)->nseg = 0;						\
+	(__buf)->m = NULL;						\
+	(__buf)->sgt = NULL;						\
+	(__buf)->opt = (__opt);						\
+} while(0)
+#define DPAA2_BUF_INIT(__buf)	DPAA2_BUF_INIT_TAGOPT((__buf), NULL, NULL)
+
+#if defined(INVARIANTS)
+/*
+ * TXPREP/TXREADY macros allow to verify whether Tx buffer is prepared to be
+ * seeded and/or ready to be used for transmission.
+ *
+ * NOTE: Any modification should be carefully analyzed and justified.
+ */
+#define DPAA2_BUF_ASSERT_TXPREP(__buf) do {				\
+	struct dpaa2_buf *__sgt = (__buf)->sgt;				\
+	KASSERT((__sgt) != NULL, ("%s: no S/G table?", __func__));	\
+									\
+	KASSERT((__buf)->paddr == 0,    ("%s: paddr set?", __func__));	\
+	KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__));	\
+	KASSERT((__buf)->dmat  != NULL, ("%s: no DMA tag?", __func__));	\
+	KASSERT((__buf)->dmap  == NULL, ("%s: DMA map set?", __func__)); \
+	KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \
+	KASSERT((__buf)->seg.ds_len  == 0, ("%s: already mapped?", __func__)); \
+	KASSERT((__buf)->nseg  == 0,    ("%s: nseg > 0?", __func__));	\
+	KASSERT((__buf)->m     == NULL, ("%s: mbuf set?", __func__));	\
+	KASSERT((__buf)->opt   != NULL, ("%s: no Tx ring?", __func__));	\
+									\
+	KASSERT((__sgt)->paddr == 0,    ("%s: S/G paddr set?", __func__)); \
+	KASSERT((__sgt)->vaddr == NULL, ("%s: S/G vaddr set?", __func__)); \
+	KASSERT((__sgt)->dmat  != NULL, ("%s: no S/G DMA tag?", __func__)); \
+	KASSERT((__sgt)->dmap  == NULL, ("%s: S/G DMA map set?", __func__)); \
+	KASSERT((__sgt)->seg.ds_addr == 0, ("%s: S/G mapped?", __func__)); \
+	KASSERT((__sgt)->seg.ds_len  == 0, ("%s: S/G mapped?", __func__)); \
+	KASSERT((__sgt)->nseg  == 0,    ("%s: S/G nseg > 0?", __func__)); \
+	KASSERT((__sgt)->m     == NULL, ("%s: S/G mbuf set?", __func__)); \
+	KASSERT((__sgt)->opt == (__buf),("%s: buf not linked?", __func__)); \
+} while(0)
+#define DPAA2_BUF_ASSERT_TXREADY(__buf) do {				\
+	struct dpaa2_buf *__sgt = (__buf)->sgt;				\
+	KASSERT((__sgt) != NULL,        ("%s: no S/G table?", __func__)); \
+									\
+	KASSERT((__buf)->paddr == 0,    ("%s: paddr set?", __func__));	\
+	KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__));	\
+	KASSERT((__buf)->dmat  != NULL, ("%s: no DMA tag?", __func__));	\
+	KASSERT((__buf)->dmap  != NULL, ("%s: no DMA map?", __func__)); \
+	KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \
+	KASSERT((__buf)->seg.ds_len  == 0, ("%s: already mapped?", __func__)); \
+	KASSERT((__buf)->nseg  == 0,    ("%s: nseg > 0?", __func__));	\
+	KASSERT((__buf)->m     == NULL, ("%s: mbuf set?", __func__));	\
+	KASSERT((__buf)->opt   != NULL, ("%s: no Tx ring?", __func__));	\
+									\
+	KASSERT((__sgt)->paddr == 0,    ("%s: S/G paddr set?", __func__)); \
+	KASSERT((__sgt)->vaddr != NULL, ("%s: no S/G vaddr?", __func__)); \
+	KASSERT((__sgt)->dmat  != NULL, ("%s: no S/G DMA tag?", __func__)); \
+	KASSERT((__sgt)->dmap  != NULL, ("%s: no S/G DMA map?", __func__)); \
+	KASSERT((__sgt)->seg.ds_addr == 0, ("%s: S/G mapped?", __func__)); \
+	KASSERT((__sgt)->seg.ds_len  == 0, ("%s: S/G mapped?", __func__)); \
+	KASSERT((__sgt)->nseg  == 0,    ("%s: S/G nseg > 0?", __func__)); \
+	KASSERT((__sgt)->m     == NULL, ("%s: S/G mbuf set?", __func__)); \
+	KASSERT((__sgt)->opt == (__buf),("%s: buf not linked?", __func__)); \
+} while(0)
+#else /* !INVARIANTS */
+#define DPAA2_BUF_ASSERT_TXPREP(__buf) do {	\
+} while(0)
+#define DPAA2_BUF_ASSERT_TXREADY(__buf) do {	\
+} while(0)
+#endif /* INVARIANTS */
+
+#if defined(INVARIANTS)
+/*
+ * RXPREP/RXREADY macros allow to verify whether Rx buffer is prepared to be
+ * seeded and/or ready to be used for reception.
+ *
+ * NOTE: Any modification should be carefully analyzed and justified.
+ */
+#define DPAA2_BUF_ASSERT_RXPREP(__buf) do {				\
+	KASSERT((__buf)->paddr == 0,    ("%s: paddr set?", __func__));	\
+	KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__));	\
+	KASSERT((__buf)->dmat  != NULL, ("%s: no DMA tag?", __func__));	\
+	/* KASSERT((__buf)->dmap  == NULL, ("%s: DMA map set?", __func__)); */ \
+	KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \
+	KASSERT((__buf)->seg.ds_len  == 0, ("%s: already mapped?", __func__)); \
+	KASSERT((__buf)->nseg  == 0,    ("%s: nseg > 0?", __func__));	\
+	KASSERT((__buf)->m     == NULL, ("%s: mbuf set?", __func__));	\
+	KASSERT((__buf)->sgt   == NULL, ("%s: S/G table set?", __func__)); \
+	KASSERT((__buf)->opt   != NULL, ("%s: no channel?", __func__));	\
+} while(0)
+#define DPAA2_BUF_ASSERT_RXREADY(__buf) do {				\
+	KASSERT((__buf)->paddr != 0,    ("%s: paddr not set?", __func__)); \
+	KASSERT((__buf)->vaddr != NULL, ("%s: vaddr not set?", __func__)); \
+	KASSERT((__buf)->dmat  != NULL, ("%s: no DMA tag?", __func__));	\
+	KASSERT((__buf)->dmap  != NULL, ("%s: no DMA map?", __func__)); \
+	KASSERT((__buf)->seg.ds_addr != 0, ("%s: not mapped?", __func__)); \
+	KASSERT((__buf)->seg.ds_len  != 0, ("%s: not mapped?", __func__)); \
+	KASSERT((__buf)->nseg  == 1,    ("%s: nseg != 1?", __func__));	\
+	KASSERT((__buf)->m     != NULL, ("%s: no mbuf?", __func__));	\
+	KASSERT((__buf)->sgt   == NULL, ("%s: S/G table set?", __func__)); \
+	KASSERT((__buf)->opt   != NULL, ("%s: no channel?", __func__));	\
+} while(0)
+#else /* !INVARIANTS */
+#define DPAA2_BUF_ASSERT_RXPREP(__buf) do {	\
+} while(0)
+#define DPAA2_BUF_ASSERT_RXREADY(__buf) do {	\
+} while(0)
+#endif /* INVARIANTS */
+
+int dpaa2_buf_seed_pool(device_t, device_t, void *, uint32_t, int, struct mtx *);
+int dpaa2_buf_seed_rxb(device_t, struct dpaa2_buf *, int, struct mtx *);
+int dpaa2_buf_seed_txb(device_t, struct dpaa2_buf *);
+
+#endif /* _DPAA2_BUF_H */
diff --git a/sys/dev/dpaa2/dpaa2_channel.c b/sys/dev/dpaa2/dpaa2_channel.c
new file mode 100644
index 000000000000..87b76923a16d
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_channel.c
@@ -0,0 +1,557 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2023 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * QBMan channel to process ingress traffic (Rx, Tx confirmation, Rx error).
+ *
+ * NOTE: Several WQs are organized into a single channel.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/mbuf.h>
+#include <sys/taskqueue.h>
+#include <sys/sysctl.h>
+#include <sys/buf_ring.h>
+#include <sys/smp.h>
+#include <sys/proc.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/atomic.h>
+#include <machine/vmparam.h>
+
+#include <net/ethernet.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_channel.h"
+#include "dpaa2_ni.h"
+#include "dpaa2_mc.h"
+#include "dpaa2_mc_if.h"
+#include "dpaa2_mcp.h"
+#include "dpaa2_io.h"
+#include "dpaa2_con.h"
+#include "dpaa2_buf.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_swp_if.h"
+#include "dpaa2_bp.h"
+#include "dpaa2_cmd_if.h"
+
+MALLOC_DEFINE(M_DPAA2_CH, "dpaa2_ch", "DPAA2 QBMan Channel");
+
+#define RX_SEG_N		 (1u)
+#define RX_SEG_SZ		 (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
+#define RX_SEG_MAXSZ	 	 (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
+CTASSERT(RX_SEG_SZ % PAGE_SIZE == 0);
+CTASSERT(RX_SEG_MAXSZ % PAGE_SIZE == 0);
+
+#define TX_SEG_N		 (16u) /* XXX-DSL: does DPAA2 limit exist? */
+#define TX_SEG_SZ		 (PAGE_SIZE)
+#define TX_SEG_MAXSZ	 	 (TX_SEG_N * TX_SEG_SZ)
+CTASSERT(TX_SEG_SZ % PAGE_SIZE == 0);
+CTASSERT(TX_SEG_MAXSZ % PAGE_SIZE == 0);
+
+#define SGT_SEG_N		 (1u)
+#define SGT_SEG_SZ		 (PAGE_SIZE)
+#define SGT_SEG_MAXSZ	 	 (PAGE_SIZE)
+CTASSERT(SGT_SEG_SZ % PAGE_SIZE == 0);
+CTASSERT(SGT_SEG_MAXSZ % PAGE_SIZE == 0);
+
+static int dpaa2_chan_setup_dma(device_t, struct dpaa2_channel *, bus_size_t);
+static int dpaa2_chan_alloc_storage(device_t, struct dpaa2_channel *, bus_size_t,
+    int, bus_size_t);
+static void dpaa2_chan_bp_task(void *, int);
+
+/**
+ * @brief Сonfigures QBMan channel and registers data availability notifications.
+ */
+int
+dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev,
+    struct dpaa2_channel **channel, uint32_t flowid, task_fn_t cleanup_task_fn)
+{
+	device_t pdev = device_get_parent(dev);
+	device_t child = dev;
+	struct dpaa2_ni_softc *sc = device_get_softc(dev);
+	struct dpaa2_io_softc *iosc = device_get_softc(iodev);
+	struct dpaa2_con_softc *consc = device_get_softc(condev);
+	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
+	struct dpaa2_devinfo *ioinfo = device_get_ivars(iodev);
+	struct dpaa2_devinfo *coninfo = device_get_ivars(condev);
+	struct dpaa2_con_notif_cfg notif_cfg;
+	struct dpaa2_io_notif_ctx *ctx;
+	struct dpaa2_channel *ch = NULL;
+	struct dpaa2_cmd cmd;
+	uint16_t rctk, contk;
+	int error;
+
+	DPAA2_CMD_INIT(&cmd);
+
+	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rctk);
+	if (error) {
+		device_printf(dev, "%s: failed to open DPRC: id=%d, error=%d\n",
+		    __func__, rcinfo->id, error);
+		goto fail_rc_open;
+	}
+	error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, coninfo->id, &contk);
+	if (error) {
+		device_printf(dev, "%s: failed to open DPCON: id=%d, error=%d\n",
+		    __func__, coninfo->id, error);
+		goto fail_con_open;
+	}
+
+	error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
+	if (error) {
+		device_printf(dev, "%s: failed to enable channel: dpcon_id=%d, "
+		    "chan_id=%d\n", __func__, coninfo->id, consc->attr.chan_id);
+		goto fail_con_enable;
+	}
+
+	ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO);
+	if (ch == NULL) {
+		device_printf(dev, "%s: malloc() failed\n", __func__);
+		error = ENOMEM;
+		goto fail_malloc;
+	}
+
+	ch->ni_dev = dev;
+	ch->io_dev = iodev;
+	ch->con_dev = condev;
+	ch->id = consc->attr.chan_id;
+	ch->flowid = flowid;
+	ch->tx_frames = 0; /* for debug purposes */
+	ch->tx_dropped = 0; /* for debug purposes */
+	ch->store_sz = 0;
+	ch->store_idx = 0;
+	ch->recycled_n = 0;
+	ch->rxq_n = 0;
+
+	NET_TASK_INIT(&ch->cleanup_task, 0, cleanup_task_fn, ch);
+	NET_TASK_INIT(&ch->bp_task, 0, dpaa2_chan_bp_task, ch);
+
+	ch->cleanup_tq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK,
+	    taskqueue_thread_enqueue, &ch->cleanup_tq);
+	taskqueue_start_threads_cpuset(&ch->cleanup_tq, 1, PI_NET,
+	    &iosc->cpu_mask, "dpaa2_ch%d cleanup", ch->id);
+
+	error = dpaa2_chan_setup_dma(dev, ch, sc->buf_align);
+	if (error != 0) {
+		device_printf(dev, "%s: failed to setup DMA\n", __func__);
+		goto fail_dma_setup;
+	}
+
+	mtx_init(&ch->xmit_mtx, "dpaa2_ch_xmit", NULL, MTX_DEF);
+
+	ch->xmit_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
+	    &ch->xmit_mtx);
+	if (ch->xmit_br == NULL) {
+		device_printf(dev, "%s: buf_ring_alloc() failed\n", __func__);
+		error = ENOMEM;
+		goto fail_buf_ring;
+	}
+
+	DPAA2_BUF_INIT(&ch->store);
+
+	/* Register the new notification context */
+	ctx = &ch->ctx;
+	ctx->qman_ctx = (uint64_t)ctx;
+	ctx->cdan_en = true;
+	ctx->fq_chan_id = ch->id;
+	ctx->io_dev = ch->io_dev;
+	ctx->channel = ch;
+	error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, ctx);
+	if (error) {
+		device_printf(dev, "%s: failed to register CDAN context\n",
+		    __func__);
+		goto fail_dpcon_notif;
+	}
+
+	/* Register DPCON notification within Management Complex */
+	notif_cfg.dpio_id = ioinfo->id;
+	notif_cfg.prior = 0;
+	notif_cfg.qman_ctx = ctx->qman_ctx;
+	error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
+	if (error) {
+		device_printf(dev, "%s: failed to register DPCON "
+		    "notifications: dpcon_id=%d, chan_id=%d\n", __func__,
+		    coninfo->id, consc->attr.chan_id);
+		goto fail_dpcon_notif;
+	}
+
+	/* Allocate initial # of Rx buffers and a channel storage */
+	error = dpaa2_buf_seed_pool(dev, bpdev, ch, DPAA2_NI_BUFS_INIT,
+	    DPAA2_RX_BUF_SIZE, NULL);
+	if (error) {
+		device_printf(dev, "%s: failed to seed buffer pool\n",
+		    __func__);
+		goto fail_dpcon_notif;
+	}
+	error = dpaa2_chan_alloc_storage(dev, ch, DPAA2_ETH_STORE_SIZE,
+	    BUS_DMA_NOWAIT, sc->buf_align);
+	if (error != 0) {
+		device_printf(dev, "%s: failed to allocate channel storage\n",
+		    __func__);
+		goto fail_dpcon_notif;
+	} else {
+		ch->store_sz = DPAA2_ETH_STORE_FRAMES;
+	}
+
+	/* Prepare queues for the channel */
+	error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_TX_CONF);
+	if (error) {
+		device_printf(dev, "%s: failed to prepare TxConf queue: "
+		    "error=%d\n", __func__, error);
+		goto fail_fq_setup;
+	}
+	error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_RX);
+	if (error) {
+		device_printf(dev, "%s: failed to prepare Rx queue: error=%d\n",
+		    __func__, error);
+		goto fail_fq_setup;
+	}
+
+	if (bootverbose) {
+		device_printf(dev, "channel: dpio_id=%d dpcon_id=%d chan_id=%d, "
+		    "priorities=%d\n", ioinfo->id, coninfo->id, ch->id,
+		    consc->attr.prior_num);
+	}
+
+	*channel = ch;
+
+	(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
+	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
+
+	return (0);
+
+fail_fq_setup:
+	if (ch->store.vaddr != NULL) {
+		bus_dmamem_free(ch->store.dmat, ch->store.vaddr, ch->store.dmap);
+	}
+	if (ch->store.dmat != NULL) {
+		bus_dma_tag_destroy(ch->store.dmat);
+	}
+	ch->store.dmat = NULL;
+	ch->store.vaddr = NULL;
+	ch->store.paddr = 0;
+	ch->store.nseg = 0;
+fail_dpcon_notif:
+	buf_ring_free(ch->xmit_br, M_DEVBUF);
+fail_buf_ring:
+	mtx_destroy(&ch->xmit_mtx);
+fail_dma_setup:
+	/* while (taskqueue_cancel(ch->cleanup_tq, &ch->cleanup_task, NULL)) { */
+	/* 	taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */
+	/* } */
+	/* taskqueue_free(ch->cleanup_tq); */
+fail_malloc:
+	(void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk));
+fail_con_enable:
+	(void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk));
+fail_con_open:
+	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
+fail_rc_open:
+	return (error);
+}
+
+/**
+ * @brief Performs an initial configuration of the frame queue.
+ */
+int
+dpaa2_chan_setup_fq(device_t dev, struct dpaa2_channel *ch,
+    enum dpaa2_ni_queue_type queue_type)
+{
+	struct dpaa2_ni_softc *sc = device_get_softc(dev);
+	struct dpaa2_ni_fq *fq;
+
+	switch (queue_type) {
+	case DPAA2_NI_QUEUE_TX_CONF:
+		/* One queue per channel */
+		fq = &ch->txc_queue;
+		fq->chan = ch;
+		fq->flowid = ch->flowid;
+		fq->tc = 0; /* ignored */
+		fq->type = queue_type;
+		break;
+	case DPAA2_NI_QUEUE_RX:
+		KASSERT(sc->attr.num.rx_tcs <= DPAA2_MAX_TCS,
+		    ("too many Rx traffic classes: rx_tcs=%d\n",
+		    sc->attr.num.rx_tcs));
+
+		/* One queue per Rx traffic class within a channel */
+		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
+			fq = &ch->rx_queues[i];
+			fq->chan = ch;
+			fq->flowid = ch->flowid;
+			fq->tc = (uint8_t) i;
+			fq->type = queue_type;
+
+			ch->rxq_n++;
+		}
+		break;
+	case DPAA2_NI_QUEUE_RX_ERR:
+		/* One queue per network interface */
+		fq = &sc->rxe_queue;
+		fq->chan = ch;
+		fq->flowid = 0; /* ignored */
+		fq->tc = 0; /* ignored */
+		fq->type = queue_type;
+		break;
+	default:
+		device_printf(dev, "%s: unexpected frame queue type: %d\n",
+		    __func__, queue_type);
+		return (EINVAL);
+	}
+
+	return (0);
+}
+
+/**
+ * @brief Obtain the next dequeue response from the channel storage.
+ */
+int
+dpaa2_chan_next_frame(struct dpaa2_channel *ch, struct dpaa2_dq **dq)
+{
+	struct dpaa2_buf *buf = &ch->store;
+	struct dpaa2_dq *msgs = (struct dpaa2_dq *)buf->vaddr;
+	struct dpaa2_dq *msg = &msgs[ch->store_idx];
+	int rc = EINPROGRESS;
+
+	ch->store_idx++;
+
+	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
+		rc = EALREADY; /* VDQ command is expired */
+		ch->store_idx = 0;
+		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) {
+			msg = NULL; /* Null response, FD is invalid */
+		}
+	}
+	if (msg != NULL && (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY)) {
+		rc = ENOENT; /* FQ is empty */
+		ch->store_idx = 0;
+	}
+
+	if (dq != NULL) {
+		*dq = msg;
+	}
+
+	return (rc);
+}
+
+static int
+dpaa2_chan_setup_dma(device_t dev, struct dpaa2_channel *ch,
+    bus_size_t alignment)
+{
+	int error;
+
+	mtx_init(&ch->dma_mtx, "dpaa2_ch_dma_mtx", NULL, MTX_DEF);
+
+	error = bus_dma_tag_create(
+	    bus_get_dma_tag(dev),	/* parent */
+	    alignment, 0,		/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* high restricted addr */
+	    NULL, NULL,			/* filter, filterarg */
+	    RX_SEG_MAXSZ,		/* maxsize */
+	    RX_SEG_N,			/* nsegments */
+	    RX_SEG_SZ,			/* maxsegsize */
+	    0,				/* flags */
+	    NULL,			/* lockfunc */
+	    NULL,			/* lockarg */
+	    &ch->rx_dmat);
+	if (error) {
+		device_printf(dev, "%s: failed to create rx_dmat\n", __func__);
+		goto fail_rx_tag;
+	}
+
+	error = bus_dma_tag_create(
+	    bus_get_dma_tag(dev),	/* parent */
+	    alignment, 0,		/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* high restricted addr */
+	    NULL, NULL,			/* filter, filterarg */
+	    TX_SEG_MAXSZ,		/* maxsize */
+	    TX_SEG_N,			/* nsegments */
+	    TX_SEG_SZ,			/* maxsegsize */
+	    0,				/* flags */
+	    NULL,			/* lockfunc */
+	    NULL,			/* lockarg */
+	    &ch->tx_dmat);
+	if (error) {
+		device_printf(dev, "%s: failed to create tx_dmat\n", __func__);
+		goto fail_tx_tag;
+	}
+
+	error = bus_dma_tag_create(
+	    bus_get_dma_tag(dev),	/* parent */
+	    alignment, 0,		/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* high restricted addr */
+	    NULL, NULL,			/* filter, filterarg */
+	    SGT_SEG_MAXSZ,		/* maxsize */
+	    SGT_SEG_N,			/* nsegments */
+	    SGT_SEG_SZ,			/* maxsegsize */
+	    0,				/* flags */
+	    NULL,			/* lockfunc */
+	    NULL,			/* lockarg */
+	    &ch->sgt_dmat);
+	if (error) {
+		device_printf(dev, "%s: failed to create sgt_dmat\n", __func__);
+		goto fail_sgt_tag;
+	}
+
+	return (0);
+
+fail_sgt_tag:
+	bus_dma_tag_destroy(ch->tx_dmat);
+fail_tx_tag:
+	bus_dma_tag_destroy(ch->rx_dmat);
+fail_rx_tag:
+	mtx_destroy(&ch->dma_mtx);
+	ch->rx_dmat = NULL;
+	ch->tx_dmat = NULL;
+	ch->sgt_dmat = NULL;
+
+	return (error);
+}
+
+/**
+ * @brief Allocate a DMA-mapped storage to keep responses from VDQ command.
+ */
+static int
+dpaa2_chan_alloc_storage(device_t dev, struct dpaa2_channel *ch, bus_size_t size,
+    int mapflags, bus_size_t alignment)
+{
+	struct dpaa2_buf *buf = &ch->store;
+	uint32_t maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
+	int error;
+
+	error = bus_dma_tag_create(
+	    bus_get_dma_tag(dev),	/* parent */
+	    alignment, 0,		/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* high restricted addr */
+	    NULL, NULL,			/* filter, filterarg */
+	    maxsize,			/* maxsize */
+	    1,				/* nsegments */
+	    maxsize,			/* maxsegsize */
+	    BUS_DMA_ALLOCNOW,		/* flags */
+	    NULL,			/* lockfunc */
+	    NULL,			/* lockarg */
+	    &buf->dmat);
+	if (error != 0) {
+		device_printf(dev, "%s: failed to create DMA tag\n", __func__);
+		goto fail_tag;
*** 3106 LINES SKIPPED ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202308200818.37K8I3ab017277>