Date: Tue, 1 Jan 2013 18:55:05 +0000 (UTC) From: Tim Kientzle <kientzle@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r244939 - head/sys/arm/ti/cpsw Message-ID: <201301011855.r01It5p7019023@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kientzle Date: Tue Jan 1 18:55:04 2013 New Revision: 244939 URL: http://svnweb.freebsd.org/changeset/base/244939 Log: Overhauled CPSW driver for TI CPSW Ethernet module (as used in AM335x SoC for BeagleBone). Among other things: * Watchdog reset doesn't hang the driver. * Disconnecting cable doesn't hang the driver. * ifconfig up/down doesn't hang the driver * Out-of-memory no longer panics the driver. Known issues: * Doesn't have good support for fragmented packets (calls m_defrag() on TX, assumes RX packets are never fragmented) * Promisc and allmulti still unimplimented * addmulti and delmulti still unimplemented * TX queue still stalls (but watchdog now consistently recovers in ~5s) * No sysctl monitoring * Only supports port0 * No switch configuration support * Not tested on anything but BeagleBone Committed from: BeagleBone Modified: head/sys/arm/ti/cpsw/if_cpsw.c head/sys/arm/ti/cpsw/if_cpswreg.h head/sys/arm/ti/cpsw/if_cpswvar.h Modified: head/sys/arm/ti/cpsw/if_cpsw.c ============================================================================== --- head/sys/arm/ti/cpsw/if_cpsw.c Tue Jan 1 18:54:55 2013 (r244938) +++ head/sys/arm/ti/cpsw/if_cpsw.c Tue Jan 1 18:55:04 2013 (r244939) @@ -95,9 +95,9 @@ static void cpsw_start(struct ifnet *ifp static void cpsw_start_locked(struct ifnet *ifp); static void cpsw_stop_locked(struct cpsw_softc *sc); static int cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data); -static int cpsw_allocate_dma(struct cpsw_softc *sc); -static int cpsw_free_dma(struct cpsw_softc *sc); -static int cpsw_new_rxbuf(struct cpsw_softc *sc, uint32_t i, uint32_t next); +static int cpsw_init_slot_lists(struct cpsw_softc *sc); +static void cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot); +static void cpsw_fill_rx_queue_locked(struct cpsw_softc *sc); static void cpsw_watchdog(struct cpsw_softc *sc); static void cpsw_intr_rx_thresh(void *arg); @@ -156,10 +156,10 @@ static struct { driver_intr_t *handler; char * description; } cpsw_intrs[CPSW_INTR_COUNT + 1] = { - { cpsw_intr_rx_thresh,"CPSW RX threshold interrupt" }, + { cpsw_intr_rx_thresh, "CPSW RX threshold interrupt" }, { cpsw_intr_rx, "CPSW RX interrupt" }, { cpsw_intr_tx, "CPSW TX interrupt" }, - { cpsw_intr_misc,"CPSW misc interrupt" }, + { cpsw_intr_misc, "CPSW misc interrupt" }, }; /* Locking macros */ @@ -199,6 +199,34 @@ static struct { } while (0) +#include <machine/stdarg.h> +static void +cpsw_debugf_head(const char *funcname) +{ + int t = (int)(time_second % (24 * 60 * 60)); + + printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); +} + +static void +cpsw_debugf(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vprintf(fmt, ap); + va_end(ap); + printf("\n"); + +} + +#define CPSW_DEBUGF(a) do { \ + if (sc->cpsw_if_flags & IFF_DEBUG) { \ + cpsw_debugf_head(__func__); \ + cpsw_debugf a; \ + } \ +} while (0) + static int cpsw_probe(device_t dev) { @@ -213,18 +241,20 @@ cpsw_probe(device_t dev) static int cpsw_attach(device_t dev) { - struct cpsw_softc *sc; + struct cpsw_softc *sc = device_get_softc(dev); struct mii_softc *miisc; struct ifnet *ifp; + void *phy_sc; int i, error, phy; uint32_t reg; - sc = device_get_softc(dev); + CPSW_DEBUGF(("")); + sc->dev = dev; sc->node = ofw_bus_get_node(dev); /* Get phy address from fdt */ - if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0) { + if (fdt_get_phyaddr(sc->node, sc->dev, &phy, &phy_sc) != 0) { device_printf(dev, "failed to get PHY address from FDT\n"); return (ENXIO); } @@ -246,14 +276,32 @@ cpsw_attach(device_t dev) device_printf(dev, "Version %d.%d (%d)\n", (reg >> 8 & 0x7), reg & 0xFF, (reg >> 11) & 0x1F); - /* Allocate DMA, buffers, buffer descriptors */ - error = cpsw_allocate_dma(sc); + //cpsw_add_sysctls(sc); TODO + + /* Allocate a busdma tag and DMA safe memory for mbufs. */ + error = bus_dma_tag_create( + bus_get_dma_tag(sc->dev), /* parent */ + 1, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + MCLBYTES, 1, /* maxsize, nsegments */ + MCLBYTES, 0, /* maxsegsz, flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &sc->mbuf_dtag); /* dmatag */ if (error) { + device_printf(dev, "bus_dma_tag_create failed\n"); cpsw_detach(dev); - return (ENXIO); + return (ENOMEM); } - //cpsw_add_sysctls(sc); TODO + /* Initialize the tx_avail and rx_avail lists. */ + error = cpsw_init_slot_lists(sc); + if (error) { + device_printf(dev, "failed to allocate dmamaps\n"); + cpsw_detach(dev); + return (ENOMEM); + } /* Allocate network interface */ ifp = sc->ifp = if_alloc(IFT_ETHER); @@ -294,7 +342,7 @@ cpsw_attach(device_t dev) /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ - cpsw_write_4(MDIOCONTROL, (1<<30) | (1<<18) | 0xFF); + cpsw_write_4(MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF); /* Attach PHY(s) */ error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd, @@ -310,7 +358,7 @@ cpsw_attach(device_t dev) miisc = LIST_FIRST(&sc->mii->mii_phys); /* Select PHY and enable interrupts */ - cpsw_write_4(MDIOUSERPHYSEL0, (1 << 6) | (miisc->mii_phy & 0x1F)); + cpsw_write_4(MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F)); /* Attach interrupt handlers */ for (i = 1; i <= CPSW_INTR_COUNT; ++i) { @@ -332,17 +380,22 @@ cpsw_attach(device_t dev) static int cpsw_detach(device_t dev) { - struct cpsw_softc *sc; - int error,i; + struct cpsw_softc *sc = device_get_softc(dev); + int error, i; - sc = device_get_softc(dev); + CPSW_DEBUGF(("")); /* Stop controller and free TX queue */ - if (sc->ifp) - cpsw_shutdown(dev); + if (device_is_attached(dev)) { + ether_ifdetach(sc->ifp); + CPSW_GLOBAL_LOCK(sc); + cpsw_stop_locked(sc); + CPSW_GLOBAL_UNLOCK(sc); + callout_drain(&sc->wd_callout); + } - /* Wait for stopping ticks */ - callout_drain(&sc->wd_callout); + bus_generic_detach(dev); + device_delete_child(dev, sc->miibus); /* Stop and release all interrupts */ for (i = 0; i < CPSW_INTR_COUNT; ++i) { @@ -355,14 +408,17 @@ cpsw_detach(device_t dev) cpsw_intrs[i + 1].description); } - /* Detach network interface */ - if (sc->ifp) { - ether_ifdetach(sc->ifp); - if_free(sc->ifp); + /* Free dmamaps and mbufs */ + for (i = 0; i < CPSW_MAX_TX_BUFFERS; i++) { + cpsw_free_slot(sc, &sc->_tx_slots[i]); + } + for (i = 0; i < CPSW_MAX_RX_BUFFERS; i++) { + cpsw_free_slot(sc, &sc->_rx_slots[i]); } - /* Free DMA resources */ - cpsw_free_dma(sc); + /* Free DMA tag */ + error = bus_dma_tag_destroy(sc->mbuf_dtag); + KASSERT(error == 0, ("Unable to destroy DMA tag")); /* Free IO memory handler */ bus_release_resources(dev, res_spec, sc->res); @@ -377,15 +433,19 @@ cpsw_detach(device_t dev) static int cpsw_suspend(device_t dev) { + struct cpsw_softc *sc = device_get_softc(dev); - device_printf(dev, "%s\n", __FUNCTION__); + CPSW_DEBUGF(("")); + CPSW_GLOBAL_LOCK(sc); + cpsw_stop_locked(sc); + CPSW_GLOBAL_UNLOCK(sc); return (0); } static int cpsw_resume(device_t dev) { - + /* XXX TODO XXX */ device_printf(dev, "%s\n", __FUNCTION__); return (0); } @@ -395,208 +455,132 @@ cpsw_shutdown(device_t dev) { struct cpsw_softc *sc = device_get_softc(dev); + CPSW_DEBUGF(("")); CPSW_GLOBAL_LOCK(sc); - cpsw_stop_locked(sc); - CPSW_GLOBAL_UNLOCK(sc); - return (0); } static int -cpsw_miibus_readreg(device_t dev, int phy, int reg) +cpsw_miibus_ready(struct cpsw_softc *sc) { - struct cpsw_softc *sc; - uint32_t r; - uint32_t retries = CPSW_MIIBUS_RETRIES; + uint32_t r, retries = CPSW_MIIBUS_RETRIES; - sc = device_get_softc(dev); - - /* Wait until interface is ready by watching GO bit */ - while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) ) + while (--retries) { + r = cpsw_read_4(MDIOUSERACCESS0); + if ((r & 1 << 31) == 0) + return 1; DELAY(CPSW_MIIBUS_DELAY); - if (!retries) - device_printf(dev, "Timeout while waiting for MDIO.\n"); + } + return 0; +} - /* Set GO, phy and reg */ - cpsw_write_4(MDIOUSERACCESS0, (1 << 31) | - ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16)); +static int +cpsw_miibus_readreg(device_t dev, int phy, int reg) +{ + struct cpsw_softc *sc = device_get_softc(dev); + uint32_t cmd, r; - while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) ) - DELAY(CPSW_MIIBUS_DELAY); - if (!retries) - device_printf(dev, "Timeout while waiting for MDIO.\n"); + if (!cpsw_miibus_ready(sc)) { + device_printf(dev, "MDIO not ready to read\n"); + return 0; + } + + /* Set GO, reg, phy */ + cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; + cpsw_write_4(MDIOUSERACCESS0, cmd); + + if (!cpsw_miibus_ready(sc)) { + device_printf(dev, "MDIO timed out during read\n"); + return 0; + } r = cpsw_read_4(MDIOUSERACCESS0); - /* Check for ACK */ - if(r & (1<<29)) { - return (r & 0xFFFF); + if((r & 1 << 29) == 0) { + device_printf(dev, "Failed to read from PHY.\n"); + r = 0; } - device_printf(dev, "Failed to read from PHY.\n"); - return 0; + return (r & 0xFFFF); } static int cpsw_miibus_writereg(device_t dev, int phy, int reg, int value) { - struct cpsw_softc *sc; - uint32_t retries = CPSW_MIIBUS_RETRIES; - - sc = device_get_softc(dev); - - /* Wait until interface is ready by watching GO bit */ - while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) ) - DELAY(CPSW_MIIBUS_DELAY); - if (!retries) - device_printf(dev, "Timeout while waiting for MDIO.\n"); + struct cpsw_softc *sc = device_get_softc(dev); + uint32_t cmd; - /* Set GO, WRITE, phy, reg and value */ - cpsw_write_4(MDIOUSERACCESS0, (value & 0xFFFF) | (3 << 30) | - ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16)); + if (!cpsw_miibus_ready(sc)) { + device_printf(dev, "MDIO not ready to write\n"); + return 0; + } - while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) ) - DELAY(CPSW_MIIBUS_DELAY); - if (!retries) - device_printf(dev, "Timeout while waiting for MDIO.\n"); + /* Set GO, WRITE, reg, phy, and value */ + cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 + | (value & 0xFFFF); + cpsw_write_4(MDIOUSERACCESS0, cmd); - /* Check for ACK */ - if(cpsw_read_4(MDIOUSERACCESS0) & (1<<29)) { + if (!cpsw_miibus_ready(sc)) { + device_printf(dev, "MDIO timed out during write\n"); return 0; } - device_printf(dev, "Failed to write to PHY.\n"); + + if((cpsw_read_4(MDIOUSERACCESS0) & (1 << 29)) == 0) + device_printf(dev, "Failed to write to PHY.\n"); return 0; } static int -cpsw_allocate_dma(struct cpsw_softc *sc) +cpsw_init_slot_lists(struct cpsw_softc *sc) { - int err; int i; - /* Allocate a busdma tag and DMA safe memory for tx mbufs. */ - err = bus_dma_tag_create( - bus_get_dma_tag(sc->dev), /* parent */ - 1, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filtfunc, filtfuncarg */ - MCLBYTES, 1, /* maxsize, nsegments */ - MCLBYTES, 0, /* maxsegsz, flags */ - NULL, NULL, /* lockfunc, lockfuncarg */ - &sc->mbuf_dtag); /* dmatag */ + STAILQ_INIT(&sc->rx_active); + STAILQ_INIT(&sc->rx_avail); + STAILQ_INIT(&sc->tx_active); + STAILQ_INIT(&sc->tx_avail); - if (err) - return (ENOMEM); + /* Put the slot descriptors onto the avail lists. */ for (i = 0; i < CPSW_MAX_TX_BUFFERS; i++) { - if ( bus_dmamap_create(sc->mbuf_dtag, 0, &sc->tx_dmamap[i])) { - if_printf(sc->ifp, "failed to create dmamap for rx mbuf\n"); + struct cpsw_slot *slot = &sc->_tx_slots[i]; + slot->index = i; + /* XXX TODO: Remove this from here; allocate dmamaps lazily + in the encap routine to reduce memory usage. */ + if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { + if_printf(sc->ifp, "failed to create dmamap for tx mbuf\n"); return (ENOMEM); } + STAILQ_INSERT_TAIL(&sc->tx_avail, slot, next); } for (i = 0; i < CPSW_MAX_RX_BUFFERS; i++) { - if ( bus_dmamap_create(sc->mbuf_dtag, 0, &sc->rx_dmamap[i])) { + struct cpsw_slot *slot = &sc->_rx_slots[i]; + slot->index = i; + if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { if_printf(sc->ifp, "failed to create dmamap for rx mbuf\n"); return (ENOMEM); } + STAILQ_INSERT_TAIL(&sc->rx_avail, slot, next); } return (0); } -static int -cpsw_free_dma(struct cpsw_softc *sc) -{ - (void)sc; /* UNUSED */ - // TODO - return 0; -} - -static int -cpsw_new_rxbuf(struct cpsw_softc *sc, uint32_t i, uint32_t next) -{ - bus_dma_segment_t seg[1]; - struct cpsw_cpdma_bd bd; - int error; - int nsegs; - - sc->rx_mbuf[i] = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); - if (sc->rx_mbuf[i] == NULL) - return (ENOBUFS); - - sc->rx_mbuf[i]->m_len = sc->rx_mbuf[i]->m_pkthdr.len = sc->rx_mbuf[i]->m_ext.ext_size; - - error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->rx_dmamap[i], - sc->rx_mbuf[i], seg, &nsegs, BUS_DMA_NOWAIT); - - KASSERT(nsegs == 1, ("Too many segments returned!")); - if (nsegs != 1 || error) - panic("%s: nsegs(%d), error(%d)",__func__, nsegs, error); - - bus_dmamap_sync(sc->mbuf_dtag, sc->rx_dmamap[i], BUS_DMASYNC_PREREAD); - - /* Create and submit new rx descriptor*/ - bd.next = next; - bd.bufptr = seg->ds_addr; - bd.buflen = MCLBYTES-1; - bd.bufoff = 2; /* make IP hdr aligned with 4 */ - bd.pktlen = 0; - bd.flags = CPDMA_BD_OWNER; - cpsw_cpdma_write_rxbd(i, &bd); - - return (0); -} - - -static int -cpsw_encap(struct cpsw_softc *sc, struct mbuf *m0) +static void +cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { - bus_dma_segment_t seg[1]; - struct cpsw_cpdma_bd bd; int error; - int nsegs; - int idx; - if (sc->txbd_queue_size == CPSW_MAX_TX_BUFFERS) - return (ENOBUFS); - - idx = sc->txbd_head + sc->txbd_queue_size; - - if (idx >= (CPSW_MAX_TX_BUFFERS) ) - idx -= CPSW_MAX_TX_BUFFERS; - - /* Create mapping in DMA memory */ - error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->tx_dmamap[idx], m0, seg, &nsegs, - BUS_DMA_NOWAIT); - sc->tc[idx]++; - if (error != 0 || nsegs != 1 ) { - bus_dmamap_unload(sc->mbuf_dtag, sc->tx_dmamap[idx]); - return ((error != 0) ? error : -1); - } - bus_dmamap_sync(sc->mbuf_dtag, sc->tx_dmamap[idx], BUS_DMASYNC_PREWRITE); - - /* Fill descriptor data */ - bd.next = 0; - bd.bufptr = seg->ds_addr; - bd.bufoff = 0; - bd.buflen = seg->ds_len; - bd.pktlen = seg->ds_len; - /* Set OWNERSHIP, SOP, EOP */ - bd.flags = (7<<13); - - /* Write descriptor */ - cpsw_cpdma_write_txbd(idx, &bd); - sc->tx_mbuf[idx] = m0; - - /* Previous descriptor should point to us */ - cpsw_cpdma_write_txbd_next(((idx-1<0)?(CPSW_MAX_TX_BUFFERS-1):(idx-1)), - cpsw_cpdma_txbd_paddr(idx)); - - sc->txbd_queue_size++; - - return (0); + if (slot->dmamap) { + error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); + KASSERT(error == 0, ("Mapping still active")); + slot->dmamap = NULL; + } + if (slot->mbuf) { + m_freem(slot->mbuf); + slot->mbuf = NULL; + } } /* @@ -655,10 +639,13 @@ cpsw_start(struct ifnet *ifp) static void cpsw_start_locked(struct ifnet *ifp) { + bus_dma_segment_t seg[1]; + struct cpsw_cpdma_bd bd; struct cpsw_softc *sc = ifp->if_softc; + struct cpsw_queue newslots = STAILQ_HEAD_INITIALIZER(newslots); + struct cpsw_slot *slot, *prev_slot = NULL, *first_new_slot; struct mbuf *m0, *mtmp; - uint32_t queued = 0; - int error; + int error, nsegs, enqueued = 0; CPSW_TX_LOCK_ASSERT(sc); @@ -666,44 +653,111 @@ cpsw_start_locked(struct ifnet *ifp) IFF_DRV_RUNNING) return; + /* Pull pending packets from IF queue and prep them for DMA. */ for (;;) { - /* Get packet from the queue */ + slot = STAILQ_FIRST(&sc->tx_avail); + if (slot == NULL) { + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; if ((error = cpsw_pad(m0))) { + if_printf(ifp, + "%s: Dropping packet; could not pad\n", __func__); m_freem(m0); continue; } + /* TODO: don't defragment here, queue each + packet fragment as a separate entry. */ mtmp = m_defrag(m0, M_NOWAIT); if (mtmp) m0 = mtmp; - if (cpsw_encap(sc, m0)) { - IF_PREPEND(&ifp->if_snd, m0); - ifp->if_drv_flags |= IFF_DRV_OACTIVE; + slot->mbuf = m0; + /* Create mapping in DMA memory */ + error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, + m0, seg, &nsegs, BUS_DMA_NOWAIT); + KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); + KASSERT(error == 0, ("DMA error (error=%d)", error)); + if (error != 0 || nsegs != 1) { + if_printf(ifp, + "%s: Can't load packet for DMA (nsegs=%d, error=%d), dropping packet\n", + __func__, nsegs, error); + bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); + m_freem(m0); break; } - queued++; + bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, + BUS_DMASYNC_PREWRITE); + + if (prev_slot != NULL) + cpsw_cpdma_write_txbd_next(prev_slot->index, + cpsw_cpdma_txbd_paddr(slot->index)); + bd.next = 0; + bd.bufptr = seg->ds_addr; + bd.bufoff = 0; + bd.buflen = seg->ds_len; + bd.pktlen = seg->ds_len; + bd.flags = 7 << 13; /* Set OWNERSHIP, SOP, EOP */ + cpsw_cpdma_write_txbd(slot->index, &bd); + ++enqueued; + + prev_slot = slot; + STAILQ_REMOVE_HEAD(&sc->tx_avail, next); + STAILQ_INSERT_TAIL(&newslots, slot, next); BPF_MTAP(ifp, m0); } - if (!queued) + if (STAILQ_EMPTY(&newslots)) return; - if (sc->eoq) { - cpsw_write_4(CPSW_CPDMA_TX_HDP(0), cpsw_cpdma_txbd_paddr(sc->txbd_head)); - sc->eoq = 0; + /* Attach new segments to the hardware TX queue. */ + prev_slot = STAILQ_LAST(&sc->tx_active, cpsw_slot, next); + first_new_slot = STAILQ_FIRST(&newslots); + STAILQ_CONCAT(&sc->tx_active, &newslots); + if (prev_slot == NULL) { + /* Start the TX queue fresh. */ + cpsw_write_4(CPSW_CPDMA_TX_HDP(0), + cpsw_cpdma_txbd_paddr(first_new_slot->index)); + } else { + /* Add packets to current queue. */ + /* Race: The hardware might have sent the last packet + * on the queue and stopped the transmitter just + * before we got here. In that case, this is a no-op, + * but it also means there's a TX interrupt waiting + * to be processed as soon as we release the lock here. + * That TX interrupt can detect and recover from this + * situation; see cpsw_intr_tx_locked. + */ + cpsw_cpdma_write_txbd_next(prev_slot->index, + cpsw_cpdma_txbd_paddr(first_new_slot->index)); + } + /* If tx_retires hasn't changed, then we may have + lost a TX interrupt, so let the timer tick. */ + sc->tx_enqueues += enqueued; + if (sc->tx_retires_at_wd_reset != sc->tx_retires) { + sc->tx_retires_at_wd_reset = sc->tx_retires; + sc->wd_timer = 5; + } + sc->tx_queued += enqueued; + if (sc->tx_queued > sc->tx_max_queued) { + sc->tx_max_queued = sc->tx_queued; + CPSW_DEBUGF(("New TX high water mark %d", sc->tx_queued)); } - sc->wd_timer = 5; } static void cpsw_stop_locked(struct cpsw_softc *sc) { struct ifnet *ifp; + int i; + + CPSW_DEBUGF(("")); CPSW_GLOBAL_LOCK_ASSERT(sc); @@ -712,15 +766,89 @@ cpsw_stop_locked(struct cpsw_softc *sc) if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; - /* Stop tick engine */ - callout_stop(&sc->wd_callout); - /* Disable interface */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + + /* Stop tick engine */ + callout_stop(&sc->wd_callout); sc->wd_timer = 0; - /* Disable interrupts TODO */ + /* Wait for hardware to clear pending ops. */ + CPSW_GLOBAL_UNLOCK(sc); + CPSW_DEBUGF(("starting RX and TX teardown")); + cpsw_write_4(CPSW_CPDMA_RX_TEARDOWN, 0); + cpsw_write_4(CPSW_CPDMA_TX_TEARDOWN, 0); + i = 0; + cpsw_intr_rx(sc); // Try clearing without delay. + cpsw_intr_tx(sc); + while (sc->rx_running || sc->tx_running) { + DELAY(10); + cpsw_intr_rx(sc); + cpsw_intr_tx(sc); + ++i; + } + CPSW_DEBUGF(("finished RX and TX teardown (%d tries)", i)); + CPSW_GLOBAL_LOCK(sc); + + /* All slots are now available */ + STAILQ_CONCAT(&sc->rx_avail, &sc->rx_active); + STAILQ_CONCAT(&sc->tx_avail, &sc->tx_active); + CPSW_DEBUGF(("%d buffers dropped at TX reset", sc->tx_queued)); + sc->tx_queued = 0; + + /* Reset writer */ + cpsw_write_4(CPSW_WR_SOFT_RESET, 1); + while (cpsw_read_4(CPSW_WR_SOFT_RESET) & 1) + ; + + /* Reset SS */ + cpsw_write_4(CPSW_SS_SOFT_RESET, 1); + while (cpsw_read_4(CPSW_SS_SOFT_RESET) & 1) + ; + + /* Reset Sliver port 1 and 2 */ + for (i = 0; i < 2; i++) { + /* Reset */ + cpsw_write_4(CPSW_SL_SOFT_RESET(i), 1); + while (cpsw_read_4(CPSW_SL_SOFT_RESET(i)) & 1) + ; + } + + /* Reset CPDMA */ + cpsw_write_4(CPSW_CPDMA_SOFT_RESET, 1); + while (cpsw_read_4(CPSW_CPDMA_SOFT_RESET) & 1) + ; + + /* Disable TX & RX DMA */ + cpsw_write_4(CPSW_CPDMA_TX_CONTROL, 0); + cpsw_write_4(CPSW_CPDMA_RX_CONTROL, 0); + + /* Disable TX and RX interrupts for all cores. */ + for (i = 0; i < 3; ++i) { + cpsw_write_4(CPSW_WR_C_TX_EN(i), 0x00); + cpsw_write_4(CPSW_WR_C_RX_EN(i), 0x00); + cpsw_write_4(CPSW_WR_C_MISC_EN(i), 0x00); + } + + /* Clear all interrupt Masks */ + cpsw_write_4(CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); + cpsw_write_4(CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); +} + +static void +cpsw_set_promisc(struct cpsw_softc *sc, int set) +{ + if (set) { + printf("Promiscuous mode unimplemented\n"); + } +} +static void +cpsw_set_allmulti(struct cpsw_softc *sc, int set) +{ + if (set) { + printf("All-multicast mode unimplemented\n"); + } } static int @@ -729,53 +857,48 @@ cpsw_ioctl(struct ifnet *ifp, u_long com struct cpsw_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error; - uint32_t flags; + uint32_t changed; + CPSW_DEBUGF(("command=0x%lx", command)); error = 0; - // FIXME switch (command) { case SIOCSIFFLAGS: CPSW_GLOBAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - flags = ifp->if_flags ^ sc->cpsw_if_flags; - if (flags & IFF_PROMISC) - printf("%s: SIOCSIFFLAGS " - "IFF_PROMISC unimplemented\n", - __func__); - - if (flags & IFF_ALLMULTI) - printf("%s: SIOCSIFFLAGS " - "IFF_ALLMULTI unimplemented\n", - __func__); + changed = ifp->if_flags ^ sc->cpsw_if_flags; + CPSW_DEBUGF(("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed)); + if (changed & IFF_PROMISC) + cpsw_set_promisc(sc, + ifp->if_flags & IFF_PROMISC); + if (changed & IFF_ALLMULTI) + cpsw_set_allmulti(sc, + ifp->if_flags & IFF_ALLMULTI); } else { - printf("%s: SIOCSIFFLAGS cpsw_init_locked\n", __func__); - //cpsw_init_locked(sc); + CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING")); + cpsw_init_locked(sc); } - } - else if (ifp->if_drv_flags & IFF_DRV_RUNNING) + } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING")); cpsw_stop_locked(sc); + } sc->cpsw_if_flags = ifp->if_flags; CPSW_GLOBAL_UNLOCK(sc); break; case SIOCADDMULTI: - printf("%s: SIOCADDMULTI\n",__func__); + CPSW_DEBUGF(("SIOCADDMULTI unimplemented")); break; case SIOCDELMULTI: - printf("%s: SIOCDELMULTI\n",__func__); - break; - case SIOCSIFCAP: - printf("%s: SIOCSIFCAP\n",__func__); + CPSW_DEBUGF(("SIOCDELMULTI unimplemented")); break; case SIOCGIFMEDIA: - error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); - break; case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); break; default: + CPSW_DEBUGF(("ether ioctl")); error = ether_ioctl(ifp, command, data); } return (error); @@ -787,6 +910,7 @@ cpsw_ifmedia_sts(struct ifnet *ifp, stru struct cpsw_softc *sc = ifp->if_softc; struct mii_data *mii; + CPSW_DEBUGF(("")); CPSW_TX_LOCK(sc); mii = sc->mii; @@ -804,13 +928,12 @@ cpsw_ifmedia_upd(struct ifnet *ifp) { struct cpsw_softc *sc = ifp->if_softc; + CPSW_DEBUGF(("")); if (ifp->if_flags & IFF_UP) { CPSW_GLOBAL_LOCK(sc); - sc->cpsw_media_status = sc->mii->mii_media.ifm_media; mii_mediachg(sc->mii); cpsw_init_locked(sc); - CPSW_GLOBAL_UNLOCK(sc); } @@ -820,15 +943,18 @@ cpsw_ifmedia_upd(struct ifnet *ifp) static void cpsw_intr_rx_thresh(void *arg) { - (void)arg; /* UNUSED */ + struct cpsw_softc *sc = arg; + CPSW_DEBUGF(("")); } static void cpsw_intr_rx(void *arg) { struct cpsw_softc *sc = arg; + CPSW_RX_LOCK(sc); cpsw_intr_rx_locked(arg); + cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); CPSW_RX_UNLOCK(sc); } @@ -837,63 +963,155 @@ cpsw_intr_rx_locked(void *arg) { struct cpsw_softc *sc = arg; struct cpsw_cpdma_bd bd; + struct cpsw_slot *slot, *last_slot = NULL; struct ifnet *ifp; - int i; ifp = sc->ifp; + if (!sc->rx_running) + return; - i = sc->rxbd_head; - cpsw_cpdma_read_rxbd(i, &bd); - - while (bd.flags & CPDMA_BD_SOP) { - cpsw_write_4(CPSW_CPDMA_RX_CP(0), cpsw_cpdma_rxbd_paddr(i)); + /* Pull completed packets off hardware RX queue. */ + slot = STAILQ_FIRST(&sc->rx_active); + while (slot != NULL) { + cpsw_cpdma_read_rxbd(slot->index, &bd); + if (bd.flags & CPDMA_BD_OWNER) + break; /* Still in use by hardware */ + + if (bd.flags & CPDMA_BD_TDOWNCMPLT) { + CPSW_DEBUGF(("RX teardown in progress")); + cpsw_write_4(CPSW_CPDMA_RX_CP(0), 0xfffffffc); + sc->rx_running = 0; + return; + } - bus_dmamap_sync(sc->mbuf_dtag, sc->rx_dmamap[i], BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(sc->mbuf_dtag, sc->rx_dmamap[i]); + bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); /* Fill mbuf */ - sc->rx_mbuf[i]->m_hdr.mh_data += bd.bufoff; - sc->rx_mbuf[i]->m_hdr.mh_len = bd.pktlen - 4; - sc->rx_mbuf[i]->m_pkthdr.len = bd.pktlen - 4; - sc->rx_mbuf[i]->m_flags |= M_PKTHDR; - sc->rx_mbuf[i]->m_pkthdr.rcvif = ifp; + /* TODO: track SOP/EOP bits to assemble a full mbuf + out of received fragments. */ + slot->mbuf->m_hdr.mh_data += bd.bufoff; + slot->mbuf->m_hdr.mh_len = bd.pktlen - 4; + slot->mbuf->m_pkthdr.len = bd.pktlen - 4; + slot->mbuf->m_flags |= M_PKTHDR; + slot->mbuf->m_pkthdr.rcvif = ifp; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { /* check for valid CRC by looking into pkt_err[5:4] */ - if ( (bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0 ) { - sc->rx_mbuf[i]->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; - sc->rx_mbuf[i]->m_pkthdr.csum_flags |= CSUM_IP_VALID; - sc->rx_mbuf[i]->m_pkthdr.csum_data = 0xffff; + if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) { + slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; + slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; + slot->mbuf->m_pkthdr.csum_data = 0xffff; } } /* Handover packet */ CPSW_RX_UNLOCK(sc); - (*ifp->if_input)(ifp, sc->rx_mbuf[i]); - sc->rx_mbuf[i] = NULL; + (*ifp->if_input)(ifp, slot->mbuf); + slot->mbuf = NULL; CPSW_RX_LOCK(sc); - /* Allocate new buffer for current descriptor */ - cpsw_new_rxbuf(sc, i, 0); + last_slot = slot; + STAILQ_REMOVE_HEAD(&sc->rx_active, next); + STAILQ_INSERT_TAIL(&sc->rx_avail, slot, next); + slot = STAILQ_FIRST(&sc->rx_active); + } + + /* Tell hardware last slot we processed. */ + if (last_slot) + cpsw_write_4(CPSW_CPDMA_RX_CP(0), + cpsw_cpdma_rxbd_paddr(last_slot->index)); - /* we are not at tail so old tail BD should point to new one */ - cpsw_cpdma_write_rxbd_next(sc->rxbd_tail, - cpsw_cpdma_rxbd_paddr(i)); - - /* Check if EOQ is reached */ - if (cpsw_cpdma_read_rxbd_flags(sc->rxbd_tail) & CPDMA_BD_EOQ) { - cpsw_write_4(CPSW_CPDMA_RX_HDP(0), cpsw_cpdma_rxbd_paddr(i)); + /* Repopulate hardware RX queue. */ + cpsw_fill_rx_queue_locked(sc); +} + +static void +cpsw_fill_rx_queue_locked(struct cpsw_softc *sc) +{ + bus_dma_segment_t seg[1]; + struct cpsw_queue tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); + struct cpsw_cpdma_bd bd; + struct cpsw_slot *slot, *prev_slot, *next_slot; + int error, nsegs; + + /* Try to allocate new mbufs. */ + STAILQ_FOREACH(slot, &sc->rx_avail, next) { + if (slot->mbuf != NULL) + continue; + slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + if (slot->mbuf == NULL) { + if_printf(sc->ifp, "Unable to fill RX queue\n"); + break; } - sc->rxbd_tail = i; + slot->mbuf->m_len = slot->mbuf->m_pkthdr.len = slot->mbuf->m_ext.ext_size; + } + + /* Register new mbufs with hardware. */ + prev_slot = NULL; + while (!STAILQ_EMPTY(&sc->rx_avail)) { + slot = STAILQ_FIRST(&sc->rx_avail); + if (slot->mbuf == NULL) + break; + + error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, + slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); + + KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); + KASSERT(error == 0, ("DMA error (error=%d)", error)); + if (nsegs != 1 || error) { + if_printf(sc->ifp, + "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", + __func__, nsegs, error); + m_freem(slot->mbuf); + slot->mbuf = NULL; + break; + } + + bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); - /* read next descriptor */ - if (++i == CPSW_MAX_RX_BUFFERS) - i = 0; - cpsw_cpdma_read_rxbd(i, &bd); - sc->rxbd_head = i; + /* Create and submit new rx descriptor*/ + bd.next = 0; + bd.bufptr = seg->ds_addr; + bd.buflen = MCLBYTES-1; + bd.bufoff = 2; /* make IP hdr aligned with 4 */ + bd.pktlen = 0; *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201301011855.r01It5p7019023>