Date: Tue, 15 Jan 2013 18:01:23 +0000 (UTC) From: Adrian Chadd <adrian@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r245465 - head/sys/dev/ath Message-ID: <201301151801.r0FI1NYQ023469@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: adrian Date: Tue Jan 15 18:01:23 2013 New Revision: 245465 URL: http://svnweb.freebsd.org/changeset/base/245465 Log: Implement frame (data) transmission using if_transmit(), rather than if_start(). This removes the overlapping data path TX from occuring, which solves quite a number of the potential TX queue races in ath(4). It doesn't fix the net80211 layer TX queue races and it doesn't fix the raw TX path yet, but it's an important step towards this. This hasn't dropped the TX performance in my testing; primarily because now the TX path can quickly queue frames and continue along processing. This involves a few rather deep changes: * Use the ath_buf as a queue placeholder for now, as we need to be able to support queuing a list of mbufs (ie, when transmitting fragments) and m_nextpkt can't be used here (because it's what is joining the fragments together) * if_transmit() now simply allocates the ath_buf and queues it to a driver TX staging queue. * TX is now moved into a taskqueue function. * The TX taskqueue function now dequeues and transmits frames. * Fragments are handled correctly here - as the current API passes the fragment list as one mbuf list (joined with m_nextpkt) through to the driver if_transmit(). * For the couple of places where ath_start() may be called (mostly from net80211 when starting the VAP up again), just reimplement it using the new enqueue and taskqueue methods. What I don't like (about this work and the TX code in general): * I'm using the same lock for the staging TX queue management and the actual TX. This isn't required; I'm just being slack. * I haven't yet moved TX to a separate taskqueue (but the taskqueue is created); it's easy enough to do this later if necessary. I just need to make sure it's a higher priority queue, so TX has the same behaviour as it used to (where it would preempt existing RX..) * I need to re-review the TX path a little more and make sure that ieee80211_node_*() functions aren't called within the TX lock. When queueing, I should just push failed frames into a queue and when I'm wrapping up the TX code, unlock the TX lock and call ieee80211_node_free() on each. * It would be nice if I could hold the TX lock for the entire TX and TX completion, rather than this release/re-acquire behaviour. But that requires that I shuffle around the TX completion code to handle actual ath_buf free and net80211 callback/free outside of the TX lock. That's one of my next projects. * the ic_raw_xmit() path doesn't use this yet - so it still has sequencing problems with parallel, overlapping calls to the data path. I'll fix this later. Tested: * Hostap - AR9280, AR9220 * STA - AR5212, AR9280, AR5416 Modified: head/sys/dev/ath/if_ath.c head/sys/dev/ath/if_ath_misc.h head/sys/dev/ath/if_ath_sysctl.c head/sys/dev/ath/if_ath_tx.c head/sys/dev/ath/if_athvar.h Modified: head/sys/dev/ath/if_ath.c ============================================================================== --- head/sys/dev/ath/if_ath.c Tue Jan 15 17:50:07 2013 (r245464) +++ head/sys/dev/ath/if_ath.c Tue Jan 15 18:01:23 2013 (r245465) @@ -152,7 +152,6 @@ static void ath_init(void *); static void ath_stop_locked(struct ifnet *); static void ath_stop(struct ifnet *); static int ath_reset_vap(struct ieee80211vap *, u_long); -static void ath_start_queue(struct ifnet *ifp); static int ath_media_change(struct ifnet *); static void ath_watchdog(void *); static int ath_ioctl(struct ifnet *, u_long, caddr_t); @@ -213,6 +212,14 @@ static void ath_dfs_tasklet(void *, int) static void ath_node_powersave(struct ieee80211_node *, int); static int ath_node_set_tim(struct ieee80211_node *, int); +static int ath_transmit(struct ifnet *ifp, struct mbuf *m); +static void ath_qflush(struct ifnet *ifp); + +static void ath_txq_qinit(struct ifnet *ifp); +static void ath_txq_qflush(struct ifnet *ifp); +static int ath_txq_qadd(struct ifnet *ifp, struct mbuf *m0); +static void ath_txq_qrun(struct ifnet *ifp); + #ifdef IEEE80211_SUPPORT_TDMA #include <dev/ath/if_ath_tdma.h> #endif @@ -429,12 +436,20 @@ ath_attach(u_int16_t devid, struct ath_s taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", ifp->if_xname); + sc->sc_tx_tq = taskqueue_create("ath_tx_taskq", M_NOWAIT, + taskqueue_thread_enqueue, &sc->sc_tx_tq); + taskqueue_start_threads(&sc->sc_tx_tq, 1, PI_NET, + "%s TX taskq", ifp->if_xname); + TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); - TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc); - TASK_INIT(&sc->sc_fataltask,0, ath_fatal_proc, sc); + TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); + TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); + + /* XXX make this a higher priority taskqueue? */ + TASK_INIT(&sc->sc_txpkttask, 0, ath_start_task, sc); /* * Allocate hardware transmit queues: one queue for @@ -554,13 +569,18 @@ ath_attach(u_int16_t devid, struct ath_s ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; - ifp->if_start = ath_start_queue; + /* XXX net80211 uses if_start to re-start ifnet processing */ + ifp->if_start = ath_start; + ifp->if_transmit = ath_transmit; + ifp->if_qflush = ath_qflush; ifp->if_ioctl = ath_ioctl; ifp->if_init = ath_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); + ath_txq_qinit(ifp); + ic->ic_ifp = ifp; /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; @@ -966,16 +986,16 @@ ath_detach(struct ath_softc *sc) ath_stop(ifp); ieee80211_ifdetach(ifp->if_l2com); taskqueue_free(sc->sc_tq); + taskqueue_free(sc->sc_tx_tq); #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->detach(sc->sc_tx99); #endif ath_rate_detach(sc->sc_rc); - #ifdef ATH_DEBUG_ALQ if_ath_alq_tidyup(&sc->sc_alq); #endif - + ath_txq_qflush(ifp); ath_spectral_detach(sc); ath_dfs_detach(sc); ath_desc_free(sc); @@ -2454,6 +2474,14 @@ ath_buf_clone(struct ath_softc *sc, cons tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; tbf->bf_status = bf->bf_status; tbf->bf_m = bf->bf_m; + /* + * XXX Copy the node reference, the caller is responsible + * for deleting the node reference before it frees its + * buffer. + * + * XXX It's done like this so we don't call the net80211 + * code whilst having active TX queue locks held. + */ tbf->bf_node = bf->bf_node; /* will be setup by the chain/setup function */ tbf->bf_lastds = NULL; @@ -2498,13 +2526,70 @@ ath_getbuf(struct ath_softc *sc, ath_buf } static void -ath_start_queue(struct ifnet *ifp) +ath_qflush(struct ifnet *ifp) { - struct ath_softc *sc = ifp->if_softc; - ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: start"); + /* XXX complete/suspend TX */ + ath_txq_qflush(ifp); + + /* Unsuspend TX? */ +} + +/* + * Transmit a frame from net80211. + */ +static int +ath_transmit(struct ifnet *ifp, struct mbuf *m) +{ + struct ieee80211_node *ni; + struct ath_softc *sc = (struct ath_softc *) ifp->if_softc; + + ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; + + if (ath_txq_qadd(ifp, m) < 0) { + /* + * If queuing fails, the if_transmit() API makes the + * callee responsible for freeing the mbuf (rather than + * the caller, who just assumes the mbuf has been dealt + * with somehow). + * + * BUT, net80211 will free node references if if_transmit() + * fails _on encapsulated buffers_. Since drivers + * only get fully encapsulated frames from net80211 (via + * raw or otherwise APIs), we must be absolutely careful + * to not free the node ref or things will get loopy + * down the track. + * + * For tx fragments, the TX code must free whatever + * new references it created, but NOT the original + * TX node ref that was passed in. + */ + ath_freetx(m); + return (ENOBUFS); + } + + /* + * Unconditionally kick the taskqueue. + * + * Now, there's a subtle race condition possible here if we + * went down the path of only kicking the taskqueue if it + * wasn't running. If we're not absolutely, positively + * careful, we could have a small race window between + * finishing the taskqueue and clearing the TX flag, which + * would be interpreted in _this_ context as "we don't need + * to kick the TX taskqueue, as said taskqueue is already + * running." + * + * It's a problem in some of the 1GE/10GE NIC drivers. + * So until a _correct_ method for implementing this is + * drafted up and written, which avoids (potentially) + * large amounts of locking contention per-frame, let's + * just do the inefficient "kick taskqueue each time" + * method. + */ ath_tx_kick(sc); - ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: finished"); + + return (0); } void @@ -2531,9 +2616,7 @@ ath_start_task(void *arg, int npending) sc->sc_txstart_cnt++; ATH_PCU_UNLOCK(sc); - ATH_TX_LOCK(sc); - ath_start(sc->sc_ifp); - ATH_TX_UNLOCK(sc); + ath_txq_qrun(ifp); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; @@ -2541,91 +2624,298 @@ ath_start_task(void *arg, int npending) ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: finished"); } -void -ath_start(struct ifnet *ifp) +/* + * Pending TX buffer chain management routines. + */ + + +/* + * Initialise the TX queue! + */ +static void +ath_txq_qinit(struct ifnet *ifp) +{ + struct ath_softc *sc = ifp->if_softc; + + TAILQ_INIT(&sc->sc_txbuf_list); +} + +/* + * Add this mbuf to the TX buffer chain. + * + * This allocates an ath_buf, links the mbuf into it, and + * appends it to the end of the TX buffer chain. + * It doesn't fill out the ath_buf in any way besides + * that. + * + * Since the mbuf may be a list of mbufs representing + * 802.11 fragments, handle allocating ath_bufs for each + * of the mbuf fragments. + * + * If we queued it, 0 is returned. Else, < 0 is returned. + * + * If <0 is returned, the sender is responsible for + * freeing the mbuf if appropriate. + */ +static int +ath_txq_qadd(struct ifnet *ifp, struct mbuf *m0) { struct ath_softc *sc = ifp->if_softc; - struct ieee80211_node *ni; struct ath_buf *bf; - struct mbuf *m, *next; ath_bufhead frags; - int npkts = 0; + struct ieee80211_node *ni; + struct mbuf *m; - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) - return; + /* XXX recursive TX completion -> TX? */ + ATH_TX_UNLOCK_ASSERT(sc); - ATH_TX_LOCK_ASSERT(sc); + /* + * We grab the node pointer, but we don't deref + * the node. The caller must be responsible for + * freeing the node reference if it decides to + * free the mbuf. + */ + ni = (struct ieee80211_node *) m0->m_pkthdr.rcvif; - ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start: called"); + ATH_TXBUF_LOCK(sc); + if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { + /* XXX increment counter? */ + ATH_TXBUF_UNLOCK(sc); + IF_LOCK(&ifp->if_snd); + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + IF_UNLOCK(&ifp->if_snd); + return (-1); + } + ATH_TXBUF_UNLOCK(sc); - for (;;) { + /* + * Grab a TX buffer and associated resources. + */ + bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); + if (bf == NULL) { + device_printf(sc->sc_dev, + "%s: couldn't allocate a buffer\n", + __func__); + return (-1); + } + + /* Setup the initial buffer node contents */ + bf->bf_m = m0; + bf->bf_node = ni; + + /* + * Check for fragmentation. If this frame + * has been broken up verify we have enough + * buffers to send all the fragments so all + * go out or none... + */ + TAILQ_INIT(&frags); + if (m0->m_flags & M_FRAG) + DPRINTF(sc, ATH_DEBUG_XMIT, "%s: txfrag\n", __func__); + if ((m0->m_flags & M_FRAG) && + !ath_txfrag_setup(sc, &frags, m0, ni)) { + DPRINTF(sc, ATH_DEBUG_XMIT, + "%s: out of txfrag buffers\n", __func__); + sc->sc_stats.ast_tx_nofrag++; + ifp->if_oerrors++; + goto bad; + } + + /* + * Don't stuff the non-fragment frame onto the fragment + * queue. ath_txfrag_cleanup() should only be called on fragments - + * ie, the _extra_ ieee80211_node references - and not the single + * node reference already done as part of the net08211 TX call + * into the driver. + */ + + ATH_TX_LOCK(sc); + + /* + * Throw the single frame onto the queue. + */ + TAILQ_INSERT_TAIL(&sc->sc_txbuf_list, bf, bf_list); + + /* + * Update next packet duration length if it's a fragment. + * It's needed for accurate NAV calculations (which for + * fragments include the length of the NEXT fragment.) + */ + if (m0->m_nextpkt != NULL) + bf->bf_state.bfs_nextpktlen = + m0->m_nextpkt->m_pkthdr.len; + + /* + * Append the fragments. We have to populate bf and node + * references here as although the txfrag setup code does + * create buffers and increment the node ref, it doesn't + * populate the fields for us. + */ + m = m0->m_nextpkt; + while ( (bf = TAILQ_FIRST(&frags)) != NULL) { + bf->bf_m = m; + bf->bf_node = ni; + device_printf(sc->sc_dev, "%s: adding bf=%p, m=%p, ni=%p\n", + __func__, + bf, + bf->bf_m, + bf->bf_node); + TAILQ_REMOVE(&frags, bf, bf_list); + TAILQ_INSERT_TAIL(&sc->sc_txbuf_list, bf, bf_list); + + /* + * For duration (NAV) calculations, we need + * to know the next fragment size. + * + * XXX This isn't entirely accurate as it doesn't + * take pad bytes and such into account, but it'll do + * for fragment length / NAV calculations. + */ + if (m->m_nextpkt != NULL) + bf->bf_state.bfs_nextpktlen = + m->m_nextpkt->m_pkthdr.len; + + m = m->m_nextpkt; + } + ATH_TX_UNLOCK(sc); + + return (0); +bad: + device_printf(sc->sc_dev, "%s: bad?!\n", __func__); + bf->bf_m = NULL; + bf->bf_node = NULL; + ATH_TXBUF_LOCK(sc); + ath_returnbuf_head(sc, bf); + ath_txfrag_cleanup(sc, &frags, ni); + ATH_TXBUF_UNLOCK(sc); + return (-1); +} + +/* + * Flush the pending TX buffer chain. + */ +static void +ath_txq_qflush(struct ifnet *ifp) +{ + struct ath_softc *sc = ifp->if_softc; + ath_bufhead txlist; + struct ath_buf *bf; + + device_printf(sc->sc_dev, "%s: called\n", __func__); + TAILQ_INIT(&txlist); + + /* Grab lock */ + ATH_TX_LOCK(sc); + + /* Copy everything out of sc_txbuf_list into txlist */ + TAILQ_CONCAT(&txlist, &sc->sc_txbuf_list, bf_list); + + /* Unlock */ + ATH_TX_UNLOCK(sc); + + /* Now, walk the list, freeing things */ + while ((bf = TAILQ_FIRST(&txlist)) != NULL) { + TAILQ_REMOVE(&txlist, bf, bf_list); + + if (bf->bf_node) + ieee80211_free_node(bf->bf_node); + + m_free(bf->bf_m); + + /* XXX paranoia! */ + bf->bf_m = NULL; + bf->bf_node = NULL; + + /* + * XXX Perhaps do a second pass with the TXBUF lock + * held and free them all at once? + */ ATH_TXBUF_LOCK(sc); - if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { - /* XXX increment counter? */ - ATH_TXBUF_UNLOCK(sc); - IF_LOCK(&ifp->if_snd); - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - IF_UNLOCK(&ifp->if_snd); - break; - } + ath_returnbuf_head(sc, bf); ATH_TXBUF_UNLOCK(sc); - + } +} + +/* + * Walk the TX buffer queue and call ath_tx_start() on each + * of them. + */ +static void +ath_txq_qrun(struct ifnet *ifp) +{ + struct ath_softc *sc = ifp->if_softc; + ath_bufhead txlist; + struct ath_buf *bf, *bf_next; + struct ieee80211_node *ni; + struct mbuf *m; + + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) + return; + + TAILQ_INIT(&txlist); + + /* + * Grab the frames to transmit from the tx queue + */ + + /* Copy everything out of sc_txbuf_list into txlist */ + ATH_TX_LOCK(sc); + TAILQ_CONCAT(&txlist, &sc->sc_txbuf_list, bf_list); + ATH_TX_UNLOCK(sc); + + /* + * For now, the ath_tx_start() code sits behind the same lock; + * worry about serialising this in a taskqueue later. + */ + + ATH_TX_LOCK(sc); + + /* + * Attempt to transmit each frame. + * + * In the old code path - if a TX fragment fails, subsequent + * fragments in that group would be aborted. + * + * It would be nice to chain together TX fragments in this + * way so they can be aborted together. + */ + TAILQ_FOREACH_SAFE(bf, &txlist, bf_list, bf_next) { /* - * Grab a TX buffer and associated resources. + * Clear, because we're going to reuse this + * as a real ath_buf now */ - bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); - if (bf == NULL) - break; + ni = bf->bf_node; + m = bf->bf_m; + + bf->bf_node = NULL; + bf->bf_m = NULL; - IFQ_DEQUEUE(&ifp->if_snd, m); - if (m == NULL) { - ATH_TXBUF_LOCK(sc); - ath_returnbuf_head(sc, bf); - ATH_TXBUF_UNLOCK(sc); - break; - } - ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; - npkts ++; /* - * Check for fragmentation. If this frame - * has been broken up verify we have enough - * buffers to send all the fragments so all - * go out or none... - */ - TAILQ_INIT(&frags); - if ((m->m_flags & M_FRAG) && - !ath_txfrag_setup(sc, &frags, m, ni)) { - DPRINTF(sc, ATH_DEBUG_XMIT, - "%s: out of txfrag buffers\n", __func__); - sc->sc_stats.ast_tx_nofrag++; - ifp->if_oerrors++; - ath_freetx(m); - goto bad; - } - ifp->if_opackets++; - nextfrag: + * Remove it from the list. + */ + TAILQ_REMOVE(&txlist, bf, bf_list); + /* - * Pass the frame to the h/w for transmission. - * Fragmented frames have each frag chained together - * with m_nextpkt. We know there are sufficient ath_buf's - * to send all the frags because of work done by - * ath_txfrag_setup. We leave m_nextpkt set while - * calling ath_tx_start so it can use it to extend the - * the tx duration to cover the subsequent frag and - * so it can reclaim all the mbufs in case of an error; - * ath_tx_start clears m_nextpkt once it commits to - * handing the frame to the hardware. + * If we fail, free this buffer and go to the next one; + * ath_tx_start() frees the mbuf but not the node + * reference. */ - next = m->m_nextpkt; if (ath_tx_start(sc, ni, bf, m)) { - bad: + /* + * XXX m is freed by ath_tx_start(); node reference + * is not! + */ + DPRINTF(sc, ATH_DEBUG_XMIT, + "%s: failed; bf=%p, ni=%p, m=%p\n", + __func__, + bf, + ni, + m); ifp->if_oerrors++; - reclaim: bf->bf_m = NULL; bf->bf_node = NULL; ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, bf); - ath_txfrag_cleanup(sc, &frags, ni); ATH_TXBUF_UNLOCK(sc); /* * XXX todo, free the node outside of @@ -2633,37 +2923,84 @@ ath_start(struct ifnet *ifp) */ if (ni != NULL) ieee80211_free_node(ni); - continue; + } else { + /* + * Check here if the node is in power save state. + * XXX we should hold a node ref here, and release + * it after the TX has completed. + */ + ath_tx_update_tim(sc, ni, 1); + ifp->if_opackets++; } /* - * Check here if the node is in power save state. + * XXX should check for state change and flip out + * if needed. */ - ath_tx_update_tim(sc, ni, 1); + } + ATH_TX_UNLOCK(sc); - if (next != NULL) { - /* - * Beware of state changing between frags. - * XXX check sta power-save state? - */ - if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { - DPRINTF(sc, ATH_DEBUG_XMIT, - "%s: flush fragmented packet, state %s\n", - __func__, - ieee80211_state_name[ni->ni_vap->iv_state]); - ath_freetx(next); - goto reclaim; - } - m = next; - bf = TAILQ_FIRST(&frags); - KASSERT(bf != NULL, ("no buf for txfrag")); - TAILQ_REMOVE(&frags, bf, bf_list); - goto nextfrag; + /* + * If we break out early (eg a state change) we should prepend these + * frames onto the TX queue. + */ +} + +/* + * This is now primarily used by the net80211 layer to kick-start + * queue processing. + */ +void +ath_start(struct ifnet *ifp) +{ + struct mbuf *m; + struct ath_softc *sc = ifp->if_softc; + struct ieee80211_node *ni; + int npkts = 0; + + ATH_TX_UNLOCK_ASSERT(sc); + + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) + return; + + /* + * If we're below the free buffer limit, don't dequeue anything. + * The original code would not dequeue anything from the queue + * if allocating an ath_buf failed. + * + * For if_transmit, we have to either queue or drop the frame. + * So we have to try and queue it _somewhere_. + */ + for (;;) { + IFQ_DEQUEUE(&ifp->if_snd, m); + if (m == NULL) { + break; } - sc->sc_wd_timer = 5; + /* + * If we do fail here, just break out for now + * and wait until we've transmitted something + * before we attempt again? + */ + if (ath_txq_qadd(ifp, m) < 0) { + DPRINTF(sc, ATH_DEBUG_XMIT, + "%s: ath_txq_qadd failed\n", + __func__); + ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; + if (ni != NULL) + ieee80211_free_node(ni); + ath_freetx(m); + break; + } + npkts++; } - ATH_KTR(sc, ATH_KTR_TX, 1, "ath_start: finished; npkts=%d", npkts); + + /* + * Kick the taskqueue into activity, but only if we + * queued something. + */ + if (npkts > 0) + ath_tx_kick(sc); } static int Modified: head/sys/dev/ath/if_ath_misc.h ============================================================================== --- head/sys/dev/ath/if_ath_misc.h Tue Jan 15 17:50:07 2013 (r245464) +++ head/sys/dev/ath/if_ath_misc.h Tue Jan 15 18:01:23 2013 (r245465) @@ -124,9 +124,8 @@ static inline void ath_tx_kick(struct ath_softc *sc) { - ATH_TX_LOCK(sc); - ath_start(sc->sc_ifp); - ATH_TX_UNLOCK(sc); + /* XXX eventually try sc_tx_tq? */ + taskqueue_enqueue(sc->sc_tq, &sc->sc_txpkttask); } #endif Modified: head/sys/dev/ath/if_ath_sysctl.c ============================================================================== --- head/sys/dev/ath/if_ath_sysctl.c Tue Jan 15 17:50:07 2013 (r245464) +++ head/sys/dev/ath/if_ath_sysctl.c Tue Jan 15 18:01:23 2013 (r245465) @@ -505,6 +505,33 @@ ath_sysctl_forcebstuck(SYSCTL_HANDLER_AR return 0; } +static int +ath_sysctl_hangcheck(SYSCTL_HANDLER_ARGS) +{ + struct ath_softc *sc = arg1; + int val = 0; + int error; + uint32_t mask = 0xffffffff; + uint32_t *sp; + uint32_t rsize; + struct ath_hal *ah = sc->sc_ah; + + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return error; + if (val == 0) + return 0; + + /* Do a hang check */ + if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, + &mask, sizeof(mask), + (void *) &sp, &rsize)) + return (0); + device_printf(sc->sc_dev, "%s: sp=0x%08x\n", __func__, *sp); + + val = 0; + return 0; +} #ifdef ATH_DEBUG_ALQ static int @@ -661,6 +688,10 @@ ath_sysctlattach(struct ath_softc *sc) "forcebstuck", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_forcebstuck, "I", ""); + SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, + "hangcheck", CTLTYPE_INT | CTLFLAG_RW, sc, 0, + ath_sysctl_hangcheck, "I", ""); + if (ath_hal_hasintmit(ah)) { SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0, Modified: head/sys/dev/ath/if_ath_tx.c ============================================================================== --- head/sys/dev/ath/if_ath_tx.c Tue Jan 15 17:50:07 2013 (r245464) +++ head/sys/dev/ath/if_ath_tx.c Tue Jan 15 18:01:23 2013 (r245465) @@ -1124,7 +1124,11 @@ ath_tx_calc_duration(struct ath_softc *s dur = rt->info[rix].lpAckDuration; if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { dur += dur; /* additional SIFS+ACK */ - KASSERT(bf->bf_m->m_nextpkt != NULL, ("no fragment")); + if (bf->bf_state.bfs_nextpktlen == 0) { + device_printf(sc->sc_dev, + "%s: next txfrag len=0?\n", + __func__); + } /* * Include the size of next fragment so NAV is * updated properly. The last fragment uses only @@ -1135,7 +1139,7 @@ ath_tx_calc_duration(struct ath_softc *s * first fragment! */ dur += ath_hal_computetxtime(ah, rt, - bf->bf_m->m_nextpkt->m_pkthdr.len, + bf->bf_state.bfs_nextpktlen, rix, shortPreamble); } if (isfrag) { Modified: head/sys/dev/ath/if_athvar.h ============================================================================== --- head/sys/dev/ath/if_athvar.h Tue Jan 15 17:50:07 2013 (r245464) +++ head/sys/dev/ath/if_athvar.h Tue Jan 15 18:01:23 2013 (r245465) @@ -279,6 +279,8 @@ struct ath_buf { int32_t bfs_keyix; /* crypto key index */ int32_t bfs_txantenna; /* TX antenna config */ + uint16_t bfs_nextpktlen; /* length of next frag pkt */ + /* Make this an 8 bit value? */ enum ieee80211_protmode bfs_protmode; @@ -494,6 +496,13 @@ struct ath_softc { struct ath_tx_methods sc_tx; struct ath_tx_edma_fifo sc_txedma[HAL_NUM_TX_QUEUES]; + /* + * This is (currently) protected by the TX queue lock; + * it should migrate to a separate lock later + * so as to minimise contention. + */ + ath_bufhead sc_txbuf_list; + int sc_rx_statuslen; int sc_tx_desclen; int sc_tx_statuslen; @@ -514,6 +523,7 @@ struct ath_softc { struct mtx sc_tx_mtx; /* TX access mutex */ char sc_tx_mtx_name[32]; struct taskqueue *sc_tq; /* private task queue */ + struct taskqueue *sc_tx_tq; /* private TX task queue */ struct ath_hal *sc_ah; /* Atheros HAL */ struct ath_ratectrl *sc_rc; /* tx rate control support */ struct ath_tx99 *sc_tx99; /* tx99 adjunct state */ @@ -660,6 +670,7 @@ struct ath_softc { struct ath_txq *sc_ac2q[5]; /* WME AC -> h/w q map */ struct task sc_txtask; /* tx int processing */ struct task sc_txqtask; /* tx proc processing */ + struct task sc_txpkttask; /* tx frame processing */ struct ath_descdma sc_txcompdma; /* TX EDMA completion */ struct mtx sc_txcomplock; /* TX EDMA completion lock */
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201301151801.r0FI1NYQ023469>