"DMA tag (error %d)\n", error); + goto fail; + } + + error = bus_dma_tag_create(sc->sc_dmat, + 1, /* alignment */ + 0, /* boundary */ + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, + NULL, NULL, /* filter (unused) */ + MCLBYTES, /* maxsize */ + 1, /* nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_dmat_rx_buf); + if (error) { + RGE_PRINT_ERROR(sc, + "couldn't allocate device RX buffer DMA tag (error %d)\n", + error); + goto fail; + } + + error = bus_dma_tag_create(sc->sc_dmat, + RGE_STATS_ALIGNMENT, /* alignment */ + 0, /* boundary */ + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, + NULL, NULL, /* filter (unused) */ + RGE_STATS_BUF_SIZE, /* maxsize */ + 1, /* nsegments */ + RGE_STATS_BUF_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_dmat_stats_buf); + if (error) { + RGE_PRINT_ERROR(sc, + "couldn't allocate device RX buffer DMA tag (error %d)\n", + error); + goto fail; + } + + + /* Attach sysctl nodes */ + rge_sysctl_attach(sc); + + /* Determine hardware revision */ + hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV; + switch (hwrev) { + case 0x60900000: + sc->rge_type = MAC_R25; +// device_printf(dev, "RTL8125\n"); + break; + case 0x64100000: + sc->rge_type = MAC_R25B; +// device_printf(dev, "RTL8125B\n"); + break; + case 0x64900000: + sc->rge_type = MAC_R26; +// device_printf(dev, "RTL8126\n"); + break; + case 0x68800000: + sc->rge_type = MAC_R25D; +// device_printf(dev, "RTL8125D\n"); + break; + case 0x6c900000: + sc->rge_type = MAC_R27; +// device_printf(dev, "RTL8127\n"); + break; + default: + RGE_PRINT_ERROR(sc, "unknown version 0x%08x\n", hwrev); + goto fail; + } + + rge_config_imtype(sc, RGE_IMTYPE_SIM); + + /* TODO: disable ASPM/ECPM? */ + +#if 0 + /* + * PCI Express check. + */ + if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, + &offset, NULL)) { + /* Disable PCIe ASPM and ECPM. */ + reg = pci_conf_read(pa->pa_pc, pa->pa_tag, + offset + PCI_PCIE_LCSR); + reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 | + PCI_PCIE_LCSR_ECPM); + pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR, + reg); + } +#endif + + RGE_LOCK(sc); + if (rge_chipinit(sc)) { + RGE_UNLOCK(sc); + goto fail; + } + + rge_get_macaddr(sc, eaddr); + RGE_UNLOCK(sc); + + if (rge_allocmem(sc)) + goto fail; + if (rge_alloc_stats_mem(sc)) + goto fail; + + /* Initialize ifmedia structures. */ + ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd, + rge_ifmedia_sts); + rge_add_media_types(sc); + ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); + sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media; + + rge_attach_if(sc, eaddr); + + /* + * TODO: technically should be per txq but we only support + * one TXQ at the moment. + */ + mbufq_init(&sc->sc_txq, RGE_TX_LIST_CNT); + + snprintf(sc->sc_tq_name, sizeof(sc->sc_tq_name), + "%s taskq", device_get_nameunit(sc->sc_dev)); + snprintf(sc->sc_tq_thr_name, sizeof(sc->sc_tq_thr_name), + "%s taskq thread", device_get_nameunit(sc->sc_dev)); + + sc->sc_tq = taskqueue_create(sc->sc_tq_name, M_NOWAIT, + taskqueue_thread_enqueue, &sc->sc_tq); + taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s", + sc->sc_tq_thr_name); + + TASK_INIT(&sc->sc_tx_task, 0, rge_tx_task, sc); + + callout_init_mtx(&sc->sc_timeout, &sc->sc_mtx, 0); + + return (0); +fail: + rge_detach(dev); + return (ENXIO); +} + +/** + * @brief flush the mbufq queue + * + * Again this should likely be per-TXQ. + * + * This should be called with the driver lock held. + */ +static void +rge_txq_flush_mbufs(struct rge_softc *sc) +{ + struct mbuf *m; + int ntx = 0; + + RGE_ASSERT_LOCKED(sc); + + while ((m = mbufq_dequeue(&sc->sc_txq)) != NULL) { + m_freem(m); + ntx++; + } + + RGE_DPRINTF(sc, RGE_DEBUG_XMIT, "%s: %d frames flushed\n", __func__, + ntx); +} + +static int +rge_detach(device_t dev) +{ + struct rge_softc *sc = device_get_softc(dev); + int i, rid; + + /* global flag, detaching */ + RGE_LOCK(sc); + sc->sc_stopped = true; + sc->sc_detaching = true; + RGE_UNLOCK(sc); + + /* stop/drain network interface */ + callout_drain(&sc->sc_timeout); + + /* Make sure TX task isn't running */ + if (sc->sc_tq != NULL) { + while (taskqueue_cancel(sc->sc_tq, &sc->sc_tx_task, NULL) != 0) + taskqueue_drain(sc->sc_tq, &sc->sc_tx_task); + } + + RGE_LOCK(sc); + callout_stop(&sc->sc_timeout); + + /* stop NIC / DMA */ + rge_stop_locked(sc); + + /* TODO: wait for completion */ + + /* Free pending TX mbufs */ + rge_txq_flush_mbufs(sc); + + RGE_UNLOCK(sc); + + /* Free taskqueue */ + if (sc->sc_tq != NULL) { + taskqueue_free(sc->sc_tq); + sc->sc_tq = NULL; + } + + /* Free descriptor memory */ + RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: freemem\n", __func__); + rge_freemem(sc); + rge_free_stats_mem(sc); + + if (sc->sc_ifp) { + RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: ifdetach/if_free\n", + __func__); + if (sc->sc_ether_attached) + ether_ifdetach(sc->sc_ifp); + if_free(sc->sc_ifp); + } + + RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_tx_desc\n", __func__); + if (sc->sc_dmat_tx_desc) + bus_dma_tag_destroy(sc->sc_dmat_tx_desc); + RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_tx_buf\n", __func__); + if (sc->sc_dmat_tx_buf) + bus_dma_tag_destroy(sc->sc_dmat_tx_buf); + RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_rx_desc\n", __func__); + if (sc->sc_dmat_rx_desc) + bus_dma_tag_destroy(sc->sc_dmat_rx_desc); + RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_rx_buf\n", __func__); + if (sc->sc_dmat_rx_buf) + bus_dma_tag_destroy(sc->sc_dmat_rx_buf); + RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_stats_buf\n", __func__); + if (sc->sc_dmat_stats_buf) + bus_dma_tag_destroy(sc->sc_dmat_stats_buf); + RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat\n", __func__); + if (sc->sc_dmat) + bus_dma_tag_destroy(sc->sc_dmat); + + /* Teardown interrupts */ + for (i = 0; i < RGE_MSI_MESSAGES; i++) { + if (sc->sc_ih[i] != NULL) { + bus_teardown_intr(sc->sc_dev, sc->sc_irq[i], + sc->sc_ih[i]); + sc->sc_ih[i] = NULL; + } + } + + /* Free interrupt resources */ + for (i = 0, rid = 1; i < RGE_MSI_MESSAGES; i++, rid++) { + if (sc->sc_irq[i] != NULL) { + bus_release_resource(sc->sc_dev, SYS_RES_IRQ, + rid, sc->sc_irq[i]); + sc->sc_irq[i] = NULL; + } + } + + /* Free MSI allocation */ + if (sc->rge_flags & RGE_FLAG_MSI) + pci_release_msi(dev); + + if (sc->sc_bres) { + RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: release mmio\n", + __func__); + bus_release_resource(dev, SYS_RES_MEMORY, + rman_get_rid(sc->sc_bres), sc->sc_bres); + sc->sc_bres = NULL; + } + + if (sc->sc_queues) { + free(sc->sc_queues, M_DEVBUF); + sc->sc_queues = NULL; + } + + mtx_destroy(&sc->sc_mtx); + + return (0); +} + +#if 0 + +int +rge_activate(struct device *self, int act) +{ +#ifndef SMALL_KERNEL + struct rge_softc *sc = (struct rge_softc *)self; +#endif + + switch (act) { + case DVACT_POWERDOWN: +#ifndef SMALL_KERNEL + rge_wol_power(sc); +#endif + break; + } + return (0); +} +#endif + +static void +rge_intr_msi(void *arg) +{ + struct mbufq rx_mq; + struct epoch_tracker et; + struct mbuf *m; + struct rge_softc *sc = arg; + struct rge_queues *q = sc->sc_queues; + uint32_t status; + int claimed = 0, rv; + + sc->sc_drv_stats.intr_cnt++; + + mbufq_init(&rx_mq, RGE_RX_LIST_CNT); + + if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) == 0) + return; + + RGE_LOCK(sc); + + if (sc->sc_suspended || sc->sc_stopped || sc->sc_detaching) { + RGE_UNLOCK(sc); + return; + } + + /* Disable interrupts. */ + RGE_WRITE_4(sc, RGE_IMR, 0); + + if (!(sc->rge_flags & RGE_FLAG_MSI)) { + if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0) + goto done; + } + + status = RGE_READ_4(sc, RGE_ISR); + if (status) + RGE_WRITE_4(sc, RGE_ISR, status); + + if (status & RGE_ISR_PCS_TIMEOUT) + claimed = 1; + + rv = 0; + if (status & sc->rge_intrs) { + + (void) q; + rv |= rge_rxeof(q, &rx_mq); + rv |= rge_txeof(q); + + if (status & RGE_ISR_SYSTEM_ERR) { + sc->sc_drv_stats.intr_system_err_cnt++; + rge_init_locked(sc); + } + claimed = 1; + } + + if (sc->rge_timerintr) { + if (!rv) { + /* + * Nothing needs to be processed, fallback + * to use TX/RX interrupts. + */ + rge_setup_intr(sc, RGE_IMTYPE_NONE); + + /* + * Recollect, mainly to avoid the possible + * race introduced by changing interrupt + * masks. + */ + rge_rxeof(q, &rx_mq); + rge_txeof(q); + } else + RGE_WRITE_4(sc, RGE_TIMERCNT, 1); + } else if (rv) { + /* + * Assume that using simulated interrupt moderation + * (hardware timer based) could reduce the interrupt + * rate. + */ + rge_setup_intr(sc, RGE_IMTYPE_SIM); + } + + RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs); + +done: + RGE_UNLOCK(sc); + + NET_EPOCH_ENTER(et); + /* Handle any RX frames, outside of the driver lock */ + while ((m = mbufq_dequeue(&rx_mq)) != NULL) { *** 18573 LINES SKIPPED ***