Date: Mon, 14 Nov 2016 06:03:30 +0000 (UTC) From: Sepherosa Ziehau <sephe@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org Subject: svn commit: r308631 - stable/11/sys/dev/hyperv/netvsc Message-ID: <201611140603.uAE63UOo017521@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: sephe Date: Mon Nov 14 06:03:29 2016 New Revision: 308631 URL: https://svnweb.freebsd.org/changeset/base/308631 Log: MFC 308164 hyperv/hn: Regroup if_start related functions. And put them under HN_IFSTART_SUPPORT, which is by default on until we whack the if_start related bits from base system. Sponsored by: Microsoft Differential Revision: https://reviews.freebsd.org/D8392 Modified: stable/11/sys/dev/hyperv/netvsc/if_hn.c Directory Properties: stable/11/ (props changed) Modified: stable/11/sys/dev/hyperv/netvsc/if_hn.c ============================================================================== --- stable/11/sys/dev/hyperv/netvsc/if_hn.c Mon Nov 14 06:00:30 2016 (r308630) +++ stable/11/sys/dev/hyperv/netvsc/if_hn.c Mon Nov 14 06:03:29 2016 (r308631) @@ -108,6 +108,8 @@ __FBSDID("$FreeBSD$"); #include "vmbus_if.h" +#define HN_IFSTART_SUPPORT + #define HN_RING_CNT_DEF_MAX 8 /* YYY should get it from the underlying channel */ @@ -209,7 +211,9 @@ static void hn_chan_callback(struct vm static void hn_init(void *); static int hn_ioctl(struct ifnet *, u_long, caddr_t); +#ifdef HN_IFSTART_SUPPORT static void hn_start(struct ifnet *); +#endif static int hn_transmit(struct ifnet *, struct mbuf *); static void hn_xmit_qflush(struct ifnet *); static int hn_ifmedia_upd(struct ifnet *); @@ -323,10 +327,12 @@ static int hn_xmit(struct hn_tx_ring * static void hn_xmit_taskfunc(void *, int); static void hn_xmit_txeof(struct hn_tx_ring *); static void hn_xmit_txeof_taskfunc(void *, int); +#ifdef HN_IFSTART_SUPPORT static int hn_start_locked(struct hn_tx_ring *, int); static void hn_start_taskfunc(void *, int); static void hn_start_txeof(struct hn_tx_ring *); static void hn_start_txeof_taskfunc(void *, int); +#endif SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Hyper-V network interface"); @@ -394,10 +400,12 @@ static int hn_bind_tx_taskq = -1; SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN, &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu"); +#ifdef HN_IFSTART_SUPPORT /* Use ifnet.if_start instead of ifnet.if_transmit */ static int hn_use_if_start = 0; SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN, &hn_use_if_start, 0, "Use if_start TX method"); +#endif /* # of channels to use */ static int hn_chan_cnt = 0; @@ -773,10 +781,12 @@ hn_attach(device_t dev) tx_ring_cnt = hn_tx_ring_cnt; if (tx_ring_cnt <= 0 || tx_ring_cnt > ring_cnt) tx_ring_cnt = ring_cnt; +#ifdef HN_IFSTART_SUPPORT if (hn_use_if_start) { /* ifnet.if_start only needs one TX ring. */ tx_ring_cnt = 1; } +#endif /* * Set the leader CPU for channels. @@ -872,6 +882,7 @@ hn_attach(device_t dev) ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = hn_ioctl; ifp->if_init = hn_init; +#ifdef HN_IFSTART_SUPPORT if (hn_use_if_start) { int qdepth = hn_get_txswq_depth(&sc->hn_tx_ring[0]); @@ -879,7 +890,9 @@ hn_attach(device_t dev) IFQ_SET_MAXLEN(&ifp->if_snd, qdepth); ifp->if_snd.ifq_drv_maxlen = qdepth - 1; IFQ_SET_READY(&ifp->if_snd); - } else { + } else +#endif + { ifp->if_transmit = hn_transmit; ifp->if_qflush = hn_xmit_qflush; } @@ -1508,7 +1521,10 @@ again: if (!error) { ETHER_BPF_MTAP(ifp, txd->m); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); - if (!hn_use_if_start) { +#ifdef HN_IFSTART_SUPPORT + if (!hn_use_if_start) +#endif + { if_inc_counter(ifp, IFCOUNTER_OBYTES, txd->m->m_pkthdr.len); if (txd->m->m_flags & M_MCAST) @@ -1558,71 +1574,6 @@ again: } /* - * Start a transmit of one or more packets - */ -static int -hn_start_locked(struct hn_tx_ring *txr, int len) -{ - struct hn_softc *sc = txr->hn_sc; - struct ifnet *ifp = sc->hn_ifp; - - KASSERT(hn_use_if_start, - ("hn_start_locked is called, when if_start is disabled")); - KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); - mtx_assert(&txr->hn_tx_lock, MA_OWNED); - - if (__predict_false(txr->hn_suspended)) - return 0; - - if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING) - return 0; - - while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { - struct hn_txdesc *txd; - struct mbuf *m_head; - int error; - - IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); - if (m_head == NULL) - break; - - if (len > 0 && m_head->m_pkthdr.len > len) { - /* - * This sending could be time consuming; let callers - * dispatch this packet sending (and sending of any - * following up packets) to tx taskqueue. - */ - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); - return 1; - } - - txd = hn_txdesc_get(txr); - if (txd == NULL) { - txr->hn_no_txdescs++; - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); - atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); - break; - } - - error = hn_encap(txr, txd, &m_head); - if (error) { - /* Both txd and m_head are freed */ - continue; - } - - error = hn_txpkt(ifp, txr, txd); - if (__predict_false(error)) { - /* txd is freed, but m_head is not */ - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); - atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); - break; - } - } - return 0; -} - -/* * Append the specified data to the indicated mbuf chain, * Extend the mbuf chain if the new data does not fit in * existing space. @@ -2127,61 +2078,6 @@ hn_stop(struct hn_softc *sc) } static void -hn_start(struct ifnet *ifp) -{ - struct hn_softc *sc = ifp->if_softc; - struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; - - if (txr->hn_sched_tx) - goto do_sched; - - if (mtx_trylock(&txr->hn_tx_lock)) { - int sched; - - sched = hn_start_locked(txr, txr->hn_direct_tx_size); - mtx_unlock(&txr->hn_tx_lock); - if (!sched) - return; - } -do_sched: - taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); -} - -static void -hn_start_txeof(struct hn_tx_ring *txr) -{ - struct hn_softc *sc = txr->hn_sc; - struct ifnet *ifp = sc->hn_ifp; - - KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); - - if (txr->hn_sched_tx) - goto do_sched; - - if (mtx_trylock(&txr->hn_tx_lock)) { - int sched; - - atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); - sched = hn_start_locked(txr, txr->hn_direct_tx_size); - mtx_unlock(&txr->hn_tx_lock); - if (sched) { - taskqueue_enqueue(txr->hn_tx_taskq, - &txr->hn_tx_task); - } - } else { -do_sched: - /* - * Release the OACTIVE earlier, with the hope, that - * others could catch up. The task will clear the - * flag again with the hn_tx_lock to avoid possible - * races. - */ - atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); - taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); - } -} - -static void hn_init_locked(struct hn_softc *sc) { struct ifnet *ifp = sc->hn_ifp; @@ -2928,11 +2824,14 @@ hn_tx_ring_create(struct hn_softc *sc, i txr->hn_tx_taskq = sc->hn_tx_taskq; +#ifdef HN_IFSTART_SUPPORT if (hn_use_if_start) { txr->hn_txeof = hn_start_txeof; TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr); TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr); - } else { + } else +#endif + { int br_depth; txr->hn_txeof = hn_xmit_txeof; @@ -3069,7 +2968,10 @@ hn_tx_ring_create(struct hn_softc *sc, i SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail", CTLFLAG_RD, &txr->hn_txdesc_avail, 0, "# of available TX descs"); - if (!hn_use_if_start) { +#ifdef HN_IFSTART_SUPPORT + if (!hn_use_if_start) +#endif + { SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive", CTLFLAG_RD, &txr->hn_oactive, 0, "over active"); @@ -3329,6 +3231,8 @@ hn_destroy_tx_data(struct hn_softc *sc) sc->hn_tx_ring_inuse = 0; } +#ifdef HN_IFSTART_SUPPORT + static void hn_start_taskfunc(void *xtxr, int pending __unused) { @@ -3339,6 +3243,89 @@ hn_start_taskfunc(void *xtxr, int pendin mtx_unlock(&txr->hn_tx_lock); } +static int +hn_start_locked(struct hn_tx_ring *txr, int len) +{ + struct hn_softc *sc = txr->hn_sc; + struct ifnet *ifp = sc->hn_ifp; + + KASSERT(hn_use_if_start, + ("hn_start_locked is called, when if_start is disabled")); + KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); + mtx_assert(&txr->hn_tx_lock, MA_OWNED); + + if (__predict_false(txr->hn_suspended)) + return 0; + + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING) + return 0; + + while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { + struct hn_txdesc *txd; + struct mbuf *m_head; + int error; + + IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); + if (m_head == NULL) + break; + + if (len > 0 && m_head->m_pkthdr.len > len) { + /* + * This sending could be time consuming; let callers + * dispatch this packet sending (and sending of any + * following up packets) to tx taskqueue. + */ + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + return 1; + } + + txd = hn_txdesc_get(txr); + if (txd == NULL) { + txr->hn_no_txdescs++; + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); + break; + } + + error = hn_encap(txr, txd, &m_head); + if (error) { + /* Both txd and m_head are freed */ + continue; + } + + error = hn_txpkt(ifp, txr, txd); + if (__predict_false(error)) { + /* txd is freed, but m_head is not */ + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); + break; + } + } + return 0; +} + +static void +hn_start(struct ifnet *ifp) +{ + struct hn_softc *sc = ifp->if_softc; + struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; + + if (txr->hn_sched_tx) + goto do_sched; + + if (mtx_trylock(&txr->hn_tx_lock)) { + int sched; + + sched = hn_start_locked(txr, txr->hn_direct_tx_size); + mtx_unlock(&txr->hn_tx_lock); + if (!sched) + return; + } +do_sched: + taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); +} + static void hn_start_txeof_taskfunc(void *xtxr, int pending __unused) { @@ -3350,6 +3337,42 @@ hn_start_txeof_taskfunc(void *xtxr, int mtx_unlock(&txr->hn_tx_lock); } +static void +hn_start_txeof(struct hn_tx_ring *txr) +{ + struct hn_softc *sc = txr->hn_sc; + struct ifnet *ifp = sc->hn_ifp; + + KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); + + if (txr->hn_sched_tx) + goto do_sched; + + if (mtx_trylock(&txr->hn_tx_lock)) { + int sched; + + atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); + sched = hn_start_locked(txr, txr->hn_direct_tx_size); + mtx_unlock(&txr->hn_tx_lock); + if (sched) { + taskqueue_enqueue(txr->hn_tx_taskq, + &txr->hn_tx_task); + } + } else { +do_sched: + /* + * Release the OACTIVE earlier, with the hope, that + * others could catch up. The task will clear the + * flag again with the hn_tx_lock to avoid possible + * races. + */ + atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); + taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); + } +} + +#endif /* HN_IFSTART_SUPPORT */ + static int hn_xmit(struct hn_tx_ring *txr, int len) { @@ -3358,8 +3381,10 @@ hn_xmit(struct hn_tx_ring *txr, int len) struct mbuf *m_head; mtx_assert(&txr->hn_tx_lock, MA_OWNED); +#ifdef HN_IFSTART_SUPPORT KASSERT(hn_use_if_start == 0, ("hn_xmit is called, when if_start is enabled")); +#endif if (__predict_false(txr->hn_suspended)) return 0; @@ -4056,7 +4081,10 @@ hn_resume_data(struct hn_softc *sc) */ hn_resume_tx(sc, sc->hn_tx_ring_cnt); - if (!hn_use_if_start) { +#ifdef HN_IFSTART_SUPPORT + if (!hn_use_if_start) +#endif + { /* * Flush unused drbrs, since hn_tx_ring_inuse may be * reduced.
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201611140603.uAE63UOo017521>