g; + struct rge_queues *q = sc->sc_queues; + uint32_t status; + int claimed = 0, rv; + + sc->sc_drv_stats.intr_cnt++; + + mbufq_init(&rx_mq, RGE_RX_LIST_CNT); + + if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) == 0) + return; + + RGE_LOCK(sc); + + if (sc->sc_suspended || sc->sc_stopped || sc->sc_detaching) { + RGE_UNLOCK(sc); + return; + } + + /* Disable interrupts. */ + RGE_WRITE_4(sc, RGE_IMR, 0); + + if (!(sc->rge_flags & RGE_FLAG_MSI)) { + if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0) + goto done; + } + + status = RGE_READ_4(sc, RGE_ISR); + if (status) + RGE_WRITE_4(sc, RGE_ISR, status); + + if (status & RGE_ISR_PCS_TIMEOUT) + claimed = 1; + + rv = 0; + if (status & sc->rge_intrs) { + + (void) q; + rv |= rge_rxeof(q, &rx_mq); + rv |= rge_txeof(q); + + if (status & RGE_ISR_SYSTEM_ERR) { + sc->sc_drv_stats.intr_system_err_cnt++; + rge_init_locked(sc); + } + claimed = 1; + } + + if (sc->rge_timerintr) { + if (!rv) { + /* + * Nothing needs to be processed, fallback + * to use TX/RX interrupts. + */ + rge_setup_intr(sc, RGE_IMTYPE_NONE); + + /* + * Recollect, mainly to avoid the possible + * race introduced by changing interrupt + * masks. + */ + rge_rxeof(q, &rx_mq); + rge_txeof(q); + } else + RGE_WRITE_4(sc, RGE_TIMERCNT, 1); + } else if (rv) { + /* + * Assume that using simulated interrupt moderation + * (hardware timer based) could reduce the interrupt + * rate. + */ + rge_setup_intr(sc, RGE_IMTYPE_SIM); + } + + RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs); + +done: + RGE_UNLOCK(sc); + + NET_EPOCH_ENTER(et); + /* Handle any RX frames, outside of the driver lock */ + while ((m = mbufq_dequeue(&rx_mq)) != NULL) { *** 18573 LINES SKIPPED ***