From owner-svn-src-all@FreeBSD.ORG Tue Apr 19 22:08:29 2011 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id F3646106564A; Tue, 19 Apr 2011 22:08:28 +0000 (UTC) (envelope-from np@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id E234B8FC1A; Tue, 19 Apr 2011 22:08:28 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id p3JM8SaO024011; Tue, 19 Apr 2011 22:08:28 GMT (envelope-from np@svn.freebsd.org) Received: (from np@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id p3JM8SZf024007; Tue, 19 Apr 2011 22:08:28 GMT (envelope-from np@svn.freebsd.org) Message-Id: <201104192208.p3JM8SZf024007@svn.freebsd.org> From: Navdeep Parhar Date: Tue, 19 Apr 2011 22:08:28 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r220873 - head/sys/dev/cxgbe X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 19 Apr 2011 22:08:29 -0000 Author: np Date: Tue Apr 19 22:08:28 2011 New Revision: 220873 URL: http://svn.freebsd.org/changeset/base/220873 Log: - Move all Ethernet specific items from sge_eq to sge_txq. sge_eq is now a suitable base for all kinds of egress queues. - Add control queues (sge_ctrlq) and allocate one of these per hardware channel. They can be used to program filters and steer traffic (and more). MFC after: 1 week Modified: head/sys/dev/cxgbe/adapter.h head/sys/dev/cxgbe/t4_main.c head/sys/dev/cxgbe/t4_sge.c Modified: head/sys/dev/cxgbe/adapter.h ============================================================================== --- head/sys/dev/cxgbe/adapter.h Tue Apr 19 20:44:44 2011 (r220872) +++ head/sys/dev/cxgbe/adapter.h Tue Apr 19 22:08:28 2011 (r220873) @@ -110,6 +110,9 @@ enum { FW_IQ_QSIZE = 256, FW_IQ_ESIZE = 64, /* At least 64 mandated by the firmware spec */ + CTRL_EQ_QSIZE = 128, + CTRL_EQ_ESIZE = 64, + RX_IQ_QSIZE = 1024, RX_IQ_ESIZE = 64, /* At least 64 so CPL_RX_PKT will fit */ @@ -218,7 +221,7 @@ struct tx_map { struct tx_sdesc { uint8_t desc_used; /* # of hardware descriptors used by the WR */ - uint8_t map_used; /* # of frames sent out in the WR */ + uint8_t credits; /* NIC txq: # of frames sent out in the WR */ }; typedef void (iq_intr_handler_t)(void *); @@ -275,7 +278,6 @@ enum { * consumes them) but it's special enough to have its own struct (see sge_fl). */ struct sge_eq { - bus_dma_tag_t tx_tag; /* tag for transmit buffers */ bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; char lockname[16]; @@ -284,8 +286,6 @@ struct sge_eq { struct tx_desc *desc; /* KVA of descriptor ring */ bus_addr_t ba; /* bus address of descriptor ring */ - struct tx_sdesc *sdesc; /* KVA of software descriptor ring */ - struct buf_ring *br; /* tx buffer ring */ struct sge_qstat *spg; /* status page, for convenience */ uint16_t cap; /* max # of desc, for convenience */ uint16_t avail; /* available descriptors, for convenience */ @@ -295,14 +295,7 @@ struct sge_eq { uint16_t pending; /* # of descriptors used since last doorbell */ uint16_t iqid; /* iq that gets egr_update for the eq */ uint32_t cntxt_id; /* SGE context id for the eq */ - - /* DMA maps used for tx */ - struct tx_map *maps; - uint32_t map_total; /* # of DMA maps */ - uint32_t map_pidx; /* next map to be used */ - uint32_t map_cidx; /* reclaimed up to this index */ - uint32_t map_avail; /* # of available maps */ -} __aligned(CACHE_LINE_SIZE); +}; struct sge_fl { bus_dma_tag_t desc_tag; @@ -325,13 +318,23 @@ struct sge_fl { unsigned int dmamap_failed; }; -/* txq: SGE egress queue + miscellaneous items */ +/* txq: SGE egress queue + what's needed for Ethernet NIC */ struct sge_txq { struct sge_eq eq; /* MUST be first */ + + struct ifnet *ifp; /* the interface this txq belongs to */ + bus_dma_tag_t tx_tag; /* tag for transmit buffers */ + struct buf_ring *br; /* tx buffer ring */ + struct tx_sdesc *sdesc; /* KVA of software descriptor ring */ struct mbuf *m; /* held up due to temporary resource shortage */ struct task resume_tx; - struct ifnet *ifp; /* the interface this txq belongs to */ + /* DMA maps used for tx */ + struct tx_map *maps; + uint32_t map_total; /* # of DMA maps */ + uint32_t map_pidx; /* next map to be used */ + uint32_t map_cidx; /* reclaimed up to this index */ + uint32_t map_avail; /* # of available maps */ /* stats for common events first */ @@ -349,11 +352,12 @@ struct sge_txq { uint32_t no_dmamap; /* no DMA map to load the mbuf */ uint32_t no_desc; /* out of hardware descriptors */ uint32_t egr_update; /* # of SGE_EGR_UPDATE notifications for txq */ -}; +} __aligned(CACHE_LINE_SIZE); enum { RXQ_LRO_ENABLED = (1 << 0) }; + /* rxq: SGE ingress queue + SGE free list + miscellaneous items */ struct sge_rxq { struct sge_iq iq; /* MUST be first */ @@ -374,6 +378,20 @@ struct sge_rxq { } __aligned(CACHE_LINE_SIZE); +/* ctrlq: SGE egress queue + stats for control queue */ +struct sge_ctrlq { + struct sge_eq eq; /* MUST be first */ + + /* stats for common events first */ + + uint64_t total_wrs; /* # of work requests sent down this queue */ + + /* stats for not-that-common events */ + + uint32_t no_desc; /* out of hardware descriptors */ + uint32_t too_long; /* WR longer than hardware max */ +} __aligned(CACHE_LINE_SIZE); + struct sge { uint16_t timer_val[SGE_NTIMERS]; uint8_t counter_val[SGE_NCOUNTERS]; @@ -384,6 +402,7 @@ struct sge { int neq; /* total egress queues */ struct sge_iq fwq; /* Firmware event queue */ + struct sge_ctrlq *ctrlq;/* Control queues */ struct sge_iq *fiq; /* Forwarded interrupt queues (INTR_FWD) */ struct sge_txq *txq; /* NIC tx queues */ struct sge_rxq *rxq; /* NIC rx queues */ @@ -436,6 +455,9 @@ struct adapter { struct adapter_params params; struct t4_virt_res vres; + struct sysctl_ctx_list ctx; /* from first_port_up to last_port_down */ + struct sysctl_oid *oid_ctrlq; + struct mtx sc_lock; char lockname[16]; }; @@ -572,8 +594,8 @@ void t4_sge_modload(void); void t4_sge_init(struct adapter *); int t4_create_dma_tag(struct adapter *); int t4_destroy_dma_tag(struct adapter *); -int t4_setup_adapter_iqs(struct adapter *); -int t4_teardown_adapter_iqs(struct adapter *); +int t4_setup_adapter_queues(struct adapter *); +int t4_teardown_adapter_queues(struct adapter *); int t4_setup_eth_queues(struct port_info *); int t4_teardown_eth_queues(struct port_info *); void t4_intr_all(void *); @@ -583,6 +605,7 @@ void t4_intr_evt(void *); void t4_intr_data(void *); void t4_evt_rx(void *); void t4_eth_rx(void *); +int t4_mgmt_tx(struct adapter *, struct mbuf *); int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *); void t4_update_fl_bufsize(struct ifnet *); Modified: head/sys/dev/cxgbe/t4_main.c ============================================================================== --- head/sys/dev/cxgbe/t4_main.c Tue Apr 19 20:44:44 2011 (r220872) +++ head/sys/dev/cxgbe/t4_main.c Tue Apr 19 22:08:28 2011 (r220873) @@ -543,7 +543,8 @@ t4_attach(device_t dev) s = &sc->sge; s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; - s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */ + s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ + s->neq += NCHAN; /* control queues, 1 per hw channel */ s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ if (iaq.intr_fwd) { sc->flags |= INTR_FWD; @@ -551,6 +552,8 @@ t4_attach(device_t dev) s->fiq = malloc(NFIQ(sc) * sizeof(struct sge_iq), M_CXGBE, M_ZERO | M_WAITOK); } + s->ctrlq = malloc(NCHAN * sizeof(struct sge_ctrlq), M_CXGBE, + M_ZERO | M_WAITOK); s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, @@ -653,6 +656,7 @@ t4_detach(device_t dev) free(sc->irq, M_CXGBE); free(sc->sge.rxq, M_CXGBE); free(sc->sge.txq, M_CXGBE); + free(sc->sge.ctrlq, M_CXGBE); free(sc->sge.fiq, M_CXGBE); free(sc->sge.iqmap, M_CXGBE); free(sc->sge.eqmap, M_CXGBE); @@ -992,7 +996,7 @@ cxgbe_transmit(struct ifnet *ifp, struct if (m->m_flags & M_FLOWID) txq += (m->m_pkthdr.flowid % pi->ntxq); - br = txq->eq.br; + br = txq->br; if (TXQ_TRYLOCK(txq) == 0) { /* @@ -1047,7 +1051,7 @@ cxgbe_qflush(struct ifnet *ifp) for_each_txq(pi, i, txq) { TXQ_LOCK(txq); m_freem(txq->m); - while ((m = buf_ring_dequeue_sc(txq->eq.br)) != NULL) + while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) m_freem(m); TXQ_UNLOCK(txq); } @@ -1894,9 +1898,9 @@ first_port_up(struct adapter *sc) ADAPTER_LOCK_ASSERT_NOTOWNED(sc); /* - * The firmware event queue and the optional forwarded interrupt queues. + * queues that belong to the adapter (not any particular port). */ - rc = t4_setup_adapter_iqs(sc); + rc = t4_setup_adapter_queues(sc); if (rc != 0) goto done; @@ -1963,7 +1967,7 @@ last_port_down(struct adapter *sc) t4_intr_disable(sc); - t4_teardown_adapter_iqs(sc); + t4_teardown_adapter_queues(sc); for (i = 0; i < sc->intr_count; i++) t4_free_irq(sc, &sc->irq[i]); @@ -2278,7 +2282,7 @@ cxgbe_tick(void *arg) drops = s->tx_drop; for_each_txq(pi, i, txq) - drops += txq->eq.br->br_drops; + drops += txq->br->br_drops; ifp->if_snd.ifq_drops = drops; ifp->if_oerrors = s->tx_error_frames; @@ -2674,7 +2678,7 @@ txq_start(struct ifnet *ifp, struct sge_ TXQ_LOCK_ASSERT_OWNED(txq); - br = txq->eq.br; + br = txq->br; m = txq->m ? txq->m : drbr_dequeue(ifp, br); if (m) t4_eth_tx(ifp, txq, m); Modified: head/sys/dev/cxgbe/t4_sge.c ============================================================================== --- head/sys/dev/cxgbe/t4_sge.c Tue Apr 19 20:44:44 2011 (r220872) +++ head/sys/dev/cxgbe/t4_sge.c Tue Apr 19 22:08:28 2011 (r220873) @@ -94,7 +94,7 @@ struct sgl { static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int, int, iq_intr_handler_t *, char *); static inline void init_fl(struct sge_fl *, int, char *); -static inline void init_txq(struct sge_txq *, int, char *); +static inline void init_eq(struct sge_eq *, int, char *); static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, bus_addr_t *, void **); static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, @@ -106,6 +106,8 @@ static int alloc_iq(struct sge_iq *, int static int free_iq(struct sge_iq *); static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int); static int free_rxq(struct port_info *, struct sge_rxq *); +static int alloc_ctrlq(struct adapter *, struct sge_ctrlq *, int); +static int free_ctrlq(struct adapter *, struct sge_ctrlq *); static int alloc_txq(struct port_info *, struct sge_txq *, int); static int free_txq(struct port_info *, struct sge_txq *); static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); @@ -115,8 +117,8 @@ static inline void ring_fl_db(struct ada static void refill_fl(struct sge_fl *, int); static int alloc_fl_sdesc(struct sge_fl *); static void free_fl_sdesc(struct sge_fl *); -static int alloc_eq_maps(struct sge_eq *); -static void free_eq_maps(struct sge_eq *); +static int alloc_tx_maps(struct sge_txq *); +static void free_tx_maps(struct sge_txq *); static void set_fl_tag_idx(struct sge_fl *, int); static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int); @@ -130,14 +132,16 @@ static inline void write_ulp_cpl_sgl(str struct txpkts *, struct mbuf *, struct sgl *); static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *); static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); -static inline void ring_tx_db(struct adapter *, struct sge_eq *); +static inline void ring_eq_db(struct adapter *, struct sge_eq *); static inline int reclaimable(struct sge_eq *); -static int reclaim_tx_descs(struct sge_eq *, int, int); +static int reclaim_tx_descs(struct sge_txq *, int, int); static void write_eqflush_wr(struct sge_eq *); static __be64 get_flit(bus_dma_segment_t *, int, int); static int handle_sge_egr_update(struct adapter *, const struct cpl_sge_egr_update *); +static int ctrl_tx(struct adapter *, struct sge_ctrlq *, struct mbuf *); + /* * Called on MOD_LOAD and fills up fl_buf_info[]. */ @@ -235,23 +239,32 @@ t4_destroy_dma_tag(struct adapter *sc) } /* - * Allocate and initialize the firmware event queue and the forwarded interrupt - * queues, if any. The adapter owns all these queues as they are not associated - * with any particular port. + * Allocate and initialize the firmware event queue, control queues, and the + * forwarded interrupt queues (if any). The adapter owns all these queues as + * they are not associated with any particular port. * * Returns errno on failure. Resources allocated up to that point may still be * allocated. Caller is responsible for cleanup in case this function fails. */ int -t4_setup_adapter_iqs(struct adapter *sc) +t4_setup_adapter_queues(struct adapter *sc) { int i, rc; struct sge_iq *iq, *fwq; + struct sge_ctrlq *ctrlq; iq_intr_handler_t *handler; char name[16]; ADAPTER_LOCK_ASSERT_NOTOWNED(sc); + if (sysctl_ctx_init(&sc->ctx) == 0) { + struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); + struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); + + sc->oid_ctrlq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, + "ctrlq", CTLFLAG_RD, NULL, "ctrl queues"); + } + fwq = &sc->sge.fwq; if (sc->flags & INTR_FWD) { iq = &sc->sge.fiq[0]; @@ -292,6 +305,25 @@ t4_setup_adapter_iqs(struct adapter *sc) if (rc != 0) { device_printf(sc->dev, "failed to create firmware event queue: %d\n", rc); + + return (rc); + } + + /* + * Control queues - one per hardware channel. + */ + ctrlq = &sc->sge.ctrlq[0]; + for (i = 0; i < NCHAN; i++, ctrlq++) { + snprintf(name, sizeof(name), "%s ctrlq%d", + device_get_nameunit(sc->dev), i); + init_eq(&ctrlq->eq, CTRL_EQ_QSIZE, name); + + rc = alloc_ctrlq(sc, ctrlq, i); + if (rc != 0) { + device_printf(sc->dev, + "failed to create control queue %d: %d\n", i, rc); + return (rc); + } } return (rc); @@ -301,13 +333,22 @@ t4_setup_adapter_iqs(struct adapter *sc) * Idempotent */ int -t4_teardown_adapter_iqs(struct adapter *sc) +t4_teardown_adapter_queues(struct adapter *sc) { int i; struct sge_iq *iq; ADAPTER_LOCK_ASSERT_NOTOWNED(sc); + /* Do this before freeing the queues */ + if (sc->oid_ctrlq) { + sysctl_ctx_free(&sc->ctx); + sc->oid_ctrlq = NULL; + } + + for (i = 0; i < NCHAN; i++) + free_ctrlq(sc, &sc->sge.ctrlq[i]); + iq = &sc->sge.fwq; free_iq(iq); if (sc->flags & INTR_FWD) { @@ -367,7 +408,7 @@ t4_setup_eth_queues(struct port_info *pi snprintf(name, sizeof(name), "%s txq%d", device_get_nameunit(pi->dev), i); - init_txq(txq, pi->qsize_txq, name); + init_eq(&txq->eq, pi->qsize_txq, name); rc = alloc_txq(pi, txq, i); if (rc != 0) @@ -758,6 +799,12 @@ nextdesc: ndescs++; FL_UNLOCK(fl); } +int +t4_mgmt_tx(struct adapter *sc, struct mbuf *m) +{ + return ctrl_tx(sc, &sc->sge.ctrlq[0], m); +} + /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */ #define TXPKTS_PKT_HDR ((\ sizeof(struct ulp_txpkt) + \ @@ -789,7 +836,7 @@ t4_eth_tx(struct ifnet *ifp, struct sge_ struct port_info *pi = (void *)ifp->if_softc; struct adapter *sc = pi->adapter; struct sge_eq *eq = &txq->eq; - struct buf_ring *br = eq->br; + struct buf_ring *br = txq->br; struct mbuf *next; int rc, coalescing, can_reclaim; struct txpkts txpkts; @@ -799,13 +846,13 @@ t4_eth_tx(struct ifnet *ifp, struct sge_ KASSERT(m, ("%s: called with nothing to do.", __func__)); prefetch(&eq->desc[eq->pidx]); - prefetch(&eq->sdesc[eq->pidx]); + prefetch(&txq->sdesc[eq->pidx]); txpkts.npkt = 0;/* indicates there's nothing in txpkts */ coalescing = 0; if (eq->avail < 8) - reclaim_tx_descs(eq, 0, 8); + reclaim_tx_descs(txq, 0, 8); for (; m; m = next ? next : drbr_dequeue(ifp, br)) { @@ -861,7 +908,7 @@ t4_eth_tx(struct ifnet *ifp, struct sge_ coalescing = 0; if (eq->avail < 8) - reclaim_tx_descs(eq, 0, 8); + reclaim_tx_descs(txq, 0, 8); rc = write_txpkt_wr(pi, txq, m, &sgl); if (rc != 0) { @@ -887,11 +934,11 @@ t4_eth_tx(struct ifnet *ifp, struct sge_ doorbell: /* Fewer and fewer doorbells as the queue fills up */ if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2))) - ring_tx_db(sc, eq); + ring_eq_db(sc, eq); can_reclaim = reclaimable(eq); if (can_reclaim >= 32) - reclaim_tx_descs(eq, can_reclaim, 32); + reclaim_tx_descs(txq, can_reclaim, 32); } if (txpkts.npkt > 0) @@ -907,16 +954,21 @@ doorbell: * WR that reduced it to 0 so we don't need another flush (we don't have * any descriptor for a flush WR anyway, duh). */ - if (m && eq->avail > 0 && !(eq->flags & EQ_CRFLUSHED)) + if (m && eq->avail > 0 && !(eq->flags & EQ_CRFLUSHED)) { + struct tx_sdesc *txsd = &txq->sdesc[eq->pidx]; + + txsd->desc_used = 1; + txsd->credits = 0; write_eqflush_wr(eq); + } txq->m = m; if (eq->pending) - ring_tx_db(sc, eq); + ring_eq_db(sc, eq); can_reclaim = reclaimable(eq); if (can_reclaim >= 32) - reclaim_tx_descs(eq, can_reclaim, 128); + reclaim_tx_descs(txq, can_reclaim, 128); return (0); } @@ -970,10 +1022,10 @@ init_fl(struct sge_fl *fl, int qsize, ch } static inline void -init_txq(struct sge_txq *txq, int qsize, char *name) +init_eq(struct sge_eq *eq, int qsize, char *name) { - txq->eq.qsize = qsize; - strlcpy(txq->eq.lockname, name, sizeof(txq->eq.lockname)); + eq->qsize = qsize; + strlcpy(eq->lockname, name, sizeof(eq->lockname)); } static int @@ -1333,6 +1385,110 @@ free_rxq(struct port_info *pi, struct sg } static int +alloc_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq, int idx) +{ + int rc, cntxt_id; + size_t len; + struct fw_eq_ctrl_cmd c; + struct sge_eq *eq = &ctrlq->eq; + char name[16]; + struct sysctl_oid *oid; + struct sysctl_oid_list *children; + + mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); + + len = eq->qsize * CTRL_EQ_ESIZE; + rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, + &eq->ba, (void **)&eq->desc); + if (rc) + return (rc); + + eq->cap = eq->qsize - SPG_LEN / CTRL_EQ_ESIZE; + eq->spg = (void *)&eq->desc[eq->cap]; + eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ + eq->iqid = sc->sge.fwq.cntxt_id; + + bzero(&c, sizeof(c)); + + c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | + V_FW_EQ_CTRL_CMD_VFN(0)); + c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | + F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */ + c.physeqid_pkd = htobe32(0); + c.fetchszm_to_iqid = + htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | + V_FW_EQ_CTRL_CMD_PCIECHN(idx) | + V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); + c.dcaen_to_eqsize = + htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | + V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | + V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | + V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize)); + c.eqaddr = htobe64(eq->ba); + + rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); + if (rc != 0) { + device_printf(sc->dev, + "failed to create control queue %d: %d\n", idx, rc); + return (rc); + } + + eq->pidx = eq->cidx = 0; + eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); + eq->flags |= (EQ_ALLOCATED | EQ_STARTED); + + cntxt_id = eq->cntxt_id - sc->sge.eq_start; + KASSERT(cntxt_id < sc->sge.neq, + ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, + cntxt_id, sc->sge.neq - 1)); + sc->sge.eqmap[cntxt_id] = eq; + + children = SYSCTL_CHILDREN(sc->oid_ctrlq); + + snprintf(name, sizeof(name), "%d", idx); + oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD, + NULL, "ctrl queue"); + children = SYSCTL_CHILDREN(oid); + + SYSCTL_ADD_UQUAD(&sc->ctx, children, OID_AUTO, "total_wrs", CTLFLAG_RD, + &ctrlq->total_wrs, "total # of work requests"); + SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, + &ctrlq->no_desc, 0, + "# of times ctrlq ran out of hardware descriptors"); + SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "too_long", CTLFLAG_RD, + &ctrlq->too_long, 0, "# of oversized work requests"); + + return (rc); +} + +static int +free_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq) +{ + int rc; + struct sge_eq *eq = &ctrlq->eq; + + if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) { + rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); + if (rc != 0) { + device_printf(sc->dev, + "failed to free ctrl queue %p: %d\n", eq, rc); + return (rc); + } + eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED); + } + + free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); + + if (mtx_initialized(&eq->eq_lock)) + mtx_destroy(&eq->eq_lock); + + bzero(ctrlq, sizeof(*ctrlq)); + return (0); +} + +static int alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx) { int rc, cntxt_id; @@ -1358,21 +1514,21 @@ alloc_txq(struct port_info *pi, struct s eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE; eq->spg = (void *)&eq->desc[eq->cap]; eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ - eq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE, + txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE, M_ZERO | M_WAITOK); - eq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock); + txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock); eq->iqid = sc->sge.rxq[pi->first_rxq].iq.cntxt_id; rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS, - BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &eq->tx_tag); + BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag); if (rc != 0) { device_printf(sc->dev, "failed to create tx DMA tag: %d\n", rc); return (rc); } - rc = alloc_eq_maps(eq); + rc = alloc_tx_maps(txq); if (rc != 0) { device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc); return (rc); @@ -1488,15 +1644,15 @@ free_txq(struct port_info *pi, struct sg free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); - free(eq->sdesc, M_CXGBE); + free(txq->sdesc, M_CXGBE); - if (eq->maps) - free_eq_maps(eq); + if (txq->maps) + free_tx_maps(txq); - buf_ring_free(eq->br, M_CXGBE); + buf_ring_free(txq->br, M_CXGBE); - if (eq->tx_tag) - bus_dma_tag_destroy(eq->tx_tag); + if (txq->tx_tag) + bus_dma_tag_destroy(txq->tx_tag); if (mtx_initialized(&eq->eq_lock)) mtx_destroy(&eq->eq_lock); @@ -1718,7 +1874,7 @@ free_fl_sdesc(struct sge_fl *fl) } static int -alloc_eq_maps(struct sge_eq *eq) +alloc_tx_maps(struct sge_txq *txq) { struct tx_map *txm; int i, rc, count; @@ -1728,16 +1884,16 @@ alloc_eq_maps(struct sge_eq *eq) * limit for any WR). txq->no_dmamap events shouldn't occur if maps is * sized for the worst case. */ - count = eq->qsize * 10 / 8; - eq->map_total = eq->map_avail = count; - eq->map_cidx = eq->map_pidx = 0; + count = txq->eq.qsize * 10 / 8; + txq->map_total = txq->map_avail = count; + txq->map_cidx = txq->map_pidx = 0; - eq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE, + txq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE, M_ZERO | M_WAITOK); - txm = eq->maps; + txm = txq->maps; for (i = 0; i < count; i++, txm++) { - rc = bus_dmamap_create(eq->tx_tag, 0, &txm->map); + rc = bus_dmamap_create(txq->tx_tag, 0, &txm->map); if (rc != 0) goto failed; } @@ -1746,36 +1902,36 @@ alloc_eq_maps(struct sge_eq *eq) failed: while (--i >= 0) { txm--; - bus_dmamap_destroy(eq->tx_tag, txm->map); + bus_dmamap_destroy(txq->tx_tag, txm->map); } - KASSERT(txm == eq->maps, ("%s: EDOOFUS", __func__)); + KASSERT(txm == txq->maps, ("%s: EDOOFUS", __func__)); - free(eq->maps, M_CXGBE); - eq->maps = NULL; + free(txq->maps, M_CXGBE); + txq->maps = NULL; return (rc); } static void -free_eq_maps(struct sge_eq *eq) +free_tx_maps(struct sge_txq *txq) { struct tx_map *txm; int i; - txm = eq->maps; - for (i = 0; i < eq->map_total; i++, txm++) { + txm = txq->maps; + for (i = 0; i < txq->map_total; i++, txm++) { if (txm->m) { - bus_dmamap_unload(eq->tx_tag, txm->map); + bus_dmamap_unload(txq->tx_tag, txm->map); m_freem(txm->m); txm->m = NULL; } - bus_dmamap_destroy(eq->tx_tag, txm->map); + bus_dmamap_destroy(txq->tx_tag, txm->map); } - free(eq->maps, M_CXGBE); - eq->maps = NULL; + free(txq->maps, M_CXGBE); + txq->maps = NULL; } /* @@ -1802,7 +1958,6 @@ get_pkt_sgl(struct sge_txq *txq, struct int sgl_only) { struct mbuf *m = *fp; - struct sge_eq *eq = &txq->eq; struct tx_map *txm; int rc, defragged = 0, n; @@ -1816,11 +1971,11 @@ start: sgl->nsegs = 0; if (m->m_pkthdr.len <= IMM_LEN && !sgl_only) return (0); /* nsegs = 0 tells caller to use imm. tx */ - if (eq->map_avail == 0) { + if (txq->map_avail == 0) { txq->no_dmamap++; return (ENOMEM); } - txm = &eq->maps[eq->map_pidx]; + txm = &txq->maps[txq->map_pidx]; if (m->m_pkthdr.tso_segsz && m->m_len < 50) { *fp = m_pullup(m, 50); @@ -1829,7 +1984,7 @@ start: sgl->nsegs = 0; return (ENOBUFS); } - rc = bus_dmamap_load_mbuf_sg(eq->tx_tag, txm->map, m, sgl->seg, + rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg, &sgl->nsegs, BUS_DMA_NOWAIT); if (rc == EFBIG && defragged == 0) { m = m_defrag(m, M_DONTWAIT); @@ -1844,9 +1999,9 @@ start: sgl->nsegs = 0; return (rc); txm->m = m; - eq->map_avail--; - if (++eq->map_pidx == eq->map_total) - eq->map_pidx = 0; + txq->map_avail--; + if (++txq->map_pidx == txq->map_total) + txq->map_pidx = 0; KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS, ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs)); @@ -1870,7 +2025,6 @@ start: sgl->nsegs = 0; static int free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl) { - struct sge_eq *eq = &txq->eq; struct tx_map *txm; TXQ_LOCK_ASSERT_OWNED(txq); @@ -1880,14 +2034,14 @@ free_pkt_sgl(struct sge_txq *txq, struct /* 1 pkt uses exactly 1 map, back it out */ - eq->map_avail++; - if (eq->map_pidx > 0) - eq->map_pidx--; + txq->map_avail++; + if (txq->map_pidx > 0) + txq->map_pidx--; else - eq->map_pidx = eq->map_total - 1; + txq->map_pidx = txq->map_total - 1; - txm = &eq->maps[eq->map_pidx]; - bus_dmamap_unload(eq->tx_tag, txm->map); + txm = &txq->maps[txq->map_pidx]; + bus_dmamap_unload(txq->tx_tag, txm->map); txm->m = NULL; return (0); @@ -1997,7 +2151,7 @@ write_txpkt_wr(struct port_info *pi, str cpl->ctrl1 = htobe64(ctrl1); /* Software descriptor */ - txsd = &eq->sdesc[eq->pidx]; + txsd = &txq->sdesc[eq->pidx]; txsd->desc_used = ndesc; eq->pending += ndesc; @@ -2009,11 +2163,11 @@ write_txpkt_wr(struct port_info *pi, str /* SGL */ dst = (void *)(cpl + 1); if (sgl->nsegs > 0) { - txsd->map_used = 1; + txsd->credits = 1; txq->sgl_wrs++; write_sgl_to_txd(eq, sgl, &dst); } else { - txsd->map_used = 0; + txsd->credits = 0; txq->imm_wrs++; for (; m; m = m->m_next) { copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); @@ -2063,8 +2217,8 @@ add_to_txpkts(struct port_info *pi, stru txpkts->nflits += flits; txpkts->plen += m->m_pkthdr.len; - txsd = &eq->sdesc[eq->pidx]; - txsd->map_used++; + txsd = &txq->sdesc[eq->pidx]; + txsd->credits++; return (0); } @@ -2098,8 +2252,8 @@ add_to_txpkts(struct port_info *pi, stru txpkts->flitp = &eq->desc[eq->pidx].flit[2]; txpkts->plen = m->m_pkthdr.len; - txsd = &eq->sdesc[eq->pidx]; - txsd->map_used = 1; + txsd = &txq->sdesc[eq->pidx]; + txsd->credits = 1; return (0); } @@ -2137,7 +2291,7 @@ write_txpkts_wr(struct sge_txq *txq, str /* Everything else already written */ - txsd = &eq->sdesc[eq->pidx]; + txsd = &txq->sdesc[eq->pidx]; txsd->desc_used = ndesc; KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__)); @@ -2328,7 +2482,7 @@ copy_to_txd(struct sge_eq *eq, caddr_t f } static inline void -ring_tx_db(struct adapter *sc, struct sge_eq *eq) +ring_eq_db(struct adapter *sc, struct sge_eq *eq) { wmb(); t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), @@ -2357,11 +2511,12 @@ reclaimable(struct sge_eq *eq) * The actual number reclaimed is provided as the return value. */ static int -reclaim_tx_descs(struct sge_eq *eq, int can_reclaim, int n) +reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n) { struct tx_sdesc *txsd; struct tx_map *txm; unsigned int reclaimed, maps; + struct sge_eq *eq = &txq->eq; EQ_LOCK_ASSERT_OWNED(eq); @@ -2372,7 +2527,7 @@ reclaim_tx_descs(struct sge_eq *eq, int while (can_reclaim && maps < n) { int ndesc; - txsd = &eq->sdesc[eq->cidx]; + txsd = &txq->sdesc[eq->cidx]; ndesc = txsd->desc_used; /* Firmware doesn't return "partial" credits. */ @@ -2380,7 +2535,7 @@ reclaim_tx_descs(struct sge_eq *eq, int ("%s: unexpected number of credits: %d, %d", __func__, can_reclaim, ndesc)); - maps += txsd->map_used; + maps += txsd->credits; reclaimed += ndesc; can_reclaim -= ndesc; @@ -2390,7 +2545,7 @@ reclaim_tx_descs(struct sge_eq *eq, int eq->cidx -= eq->cap; } - txm = &eq->maps[eq->map_cidx]; + txm = &txq->maps[txq->map_cidx]; if (maps) prefetch(txm->m); @@ -2398,25 +2553,25 @@ reclaim_tx_descs(struct sge_eq *eq, int KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */ ("%s: too many descriptors available", __func__)); - eq->map_avail += maps; - KASSERT(eq->map_avail <= eq->map_total, + txq->map_avail += maps; + KASSERT(txq->map_avail <= txq->map_total, ("%s: too many maps available", __func__)); while (maps--) { struct tx_map *next; next = txm + 1; - if (__predict_false(eq->map_cidx + 1 == eq->map_total)) - next = eq->maps; + if (__predict_false(txq->map_cidx + 1 == txq->map_total)) + next = txq->maps; prefetch(next->m); - bus_dmamap_unload(eq->tx_tag, txm->map); + bus_dmamap_unload(txq->tx_tag, txm->map); m_freem(txm->m); txm->m = NULL; txm = next; - if (__predict_false(++eq->map_cidx == eq->map_total)) - eq->map_cidx = 0; + if (__predict_false(++txq->map_cidx == txq->map_total)) + txq->map_cidx = 0; } return (reclaimed); @@ -2426,7 +2581,6 @@ static void write_eqflush_wr(struct sge_eq *eq) { struct fw_eq_flush_wr *wr; - struct tx_sdesc *txsd; EQ_LOCK_ASSERT_OWNED(eq); KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__)); @@ -2437,10 +2591,6 @@ write_eqflush_wr(struct sge_eq *eq) wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) | F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); - txsd = &eq->sdesc[eq->pidx]; - txsd->desc_used = 1; - txsd->map_used = 0; - eq->flags |= EQ_CRFLUSHED; eq->pending++; eq->avail--; @@ -2507,3 +2657,56 @@ handle_sge_egr_update(struct adapter *sc return (0); } + +/* + * m0 is freed on successful transmission. + */ +static int +ctrl_tx(struct adapter *sc, struct sge_ctrlq *ctrlq, struct mbuf *m0) +{ + struct sge_eq *eq = &ctrlq->eq; + int rc = 0, ndesc; + int can_reclaim; + caddr_t dst; + struct mbuf *m; + + M_ASSERTPKTHDR(m0); + + if (m0->m_pkthdr.len > SGE_MAX_WR_LEN) { + ctrlq->too_long++; + return (EMSGSIZE); + } + ndesc = howmany(m0->m_pkthdr.len, CTRL_EQ_ESIZE); + + EQ_LOCK(eq); + + can_reclaim = reclaimable(eq); + eq->cidx += can_reclaim; + eq->avail += can_reclaim; + if (__predict_false(eq->cidx >= eq->cap)) + eq->cidx -= eq->cap; + + if (eq->avail < ndesc) { + rc = EAGAIN; + ctrlq->no_desc++; + goto failed; + } + + dst = (void *)&eq->desc[eq->pidx]; + for (m = m0; m; m = m->m_next) + copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); + + eq->pidx += ndesc; + if (__predict_false(eq->pidx >= eq->cap)) + eq->pidx -= eq->cap; + + eq->pending += ndesc; *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***