Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 3 Aug 2011 07:00:55 +0000 (UTC)
From:      Adrian Chadd <adrian@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-user@freebsd.org
Subject:   svn commit: r224625 - user/adrian/if_ath_tx/sys/dev/ath
Message-ID:  <201108030700.p7370tOH090668@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: adrian
Date: Wed Aug  3 07:00:55 2011
New Revision: 224625
URL: http://svn.freebsd.org/changeset/base/224625

Log:
  Do a majorish rework of the software TX queue handling.
  
  * Eliminate the per-node lock, it was annoying and is mostly unneeded
    just for now
  * Eliminate the per-node list of active TX nodes
  * Add a per-TID list of active TX nodes, linked off of the hardware ath_txq
  * Sprinkle lots of lock assertions around to make sure I've got things locked
    in the right places.
  
  The major locking change here is inspired by what the older reference code does.
  Here, the locking for the per-TID stuff is hidden behind that hardware TXQ,
  rather than having per-node and per-TID locks. It's more coarse grained but it'll
  be enough to get the code bootstrapped.
  
  I'll worry about more fine-grained locking later on once the rest of this first
  pass has been completed.
  
  Note - the freebsd TX processq function only locks the hardware TXQ as long as it
    needs to add/remove buffers from it. The reference code grabs the lock and holds
    it for as long as the processing takes. This means that the completion function
    also hides behind the same lock as the setup/queue functions. I'll likely do
    that in a subsequent commit.
  
  Note - I'm not properly pausing or unpausing the tid when ADDBA is in progress,
    I'm just not TX'ing on it. I'll worry about the pause/unpause later on.
    This just means that extra CPU is burnt during ADDBA exchange.
  
  Obtained from:	Atheros

Modified:
  user/adrian/if_ath_tx/sys/dev/ath/if_ath.c
  user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c
  user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.h
  user/adrian/if_ath_tx/sys/dev/ath/if_athvar.h

Modified: user/adrian/if_ath_tx/sys/dev/ath/if_ath.c
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_ath.c	Wed Aug  3 06:51:14 2011	(r224624)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_ath.c	Wed Aug  3 07:00:55 2011	(r224625)
@@ -751,12 +751,6 @@ ath_attach(u_int16_t devid, struct ath_s
 	ath_sysctl_stats_attach(sc);
 	ath_sysctl_hal_attach(sc);
 
-	/* Setup software TX queue related bits */
-	STAILQ_INIT(&sc->sc_txnodeq);
-	snprintf(sc->sc_txnodeq_name, sizeof(sc->sc_txnodeq_name),
-	    "%s: txnodeq\n", device_get_nameunit(sc->sc_dev));
-	ATH_TXNODE_LOCK_INIT(sc);
-
 	if (bootverbose)
 		ieee80211_announce(ic);
 	ath_announce(sc);
@@ -808,7 +802,6 @@ ath_detach(struct ath_softc *sc)
 	ath_desc_free(sc);
 	ath_tx_cleanup(sc);
 	ath_hal_detach(sc->sc_ah);	/* NB: sets chip in full sleep */
-	ATH_TXNODE_LOCK_DESTROY(sc);
 	if_free(ifp);
 
 	return 0;
@@ -1951,12 +1944,6 @@ ath_start(struct ifnet *ifp)
 
 		sc->sc_wd_timer = 5;
 	}
-
-	/*
-	 * Since there's no nicer place to put this for now,
-	 * stick the software TXQ punt call here.
-	 */
-	ath_txq_sched(sc);
 }
 
 static int
@@ -3863,6 +3850,7 @@ ath_txq_init(struct ath_softc *sc, struc
 	txq->axq_intrcnt = 0;
 	txq->axq_link = NULL;
 	STAILQ_INIT(&txq->axq_q);
+	STAILQ_INIT(&txq->axq_tidq);
 	ATH_TXQ_LOCK_INIT(sc, txq);
 }
 
@@ -4233,6 +4221,21 @@ ath_tx_processq(struct ath_softc *sc, st
 	if (txq->axq_depth <= 1)
 		ieee80211_ff_flush(ic, txq->axq_ac);
 #endif
+
+	/* Kick the TXQ scheduler */
+	/*
+	 * XXX for now, (whilst the completion functions aren't doing anything),
+	 * XXX re-lock the hardware txq here.
+	 *
+	 * Later on though, those completion functions may grovel around
+	 * in the per-tid state. That's going to require locking.
+	 * The reference code kept TXQ locked for the whole of this duration
+	 * so the completion functions didn't race.
+	 */
+	ATH_TXQ_LOCK(txq);
+	ath_txq_sched(sc, txq);
+	ATH_TXQ_UNLOCK(txq);
+
 	return nacked;
 }
 

Modified: user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c	Wed Aug  3 06:51:14 2011	(r224624)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c	Wed Aug  3 07:00:55 2011	(r224625)
@@ -236,8 +236,7 @@ ath_tx_dmasetup(struct ath_softc *sc, st
 }
 
 static void
-ath_tx_chaindesclist(struct ath_softc *sc, struct ath_txq *txq,
-   struct ath_buf *bf)
+ath_tx_chaindesclist(struct ath_softc *sc, struct ath_buf *bf)
 {
 	struct ath_hal *ah = sc->sc_ah;
 	struct ath_desc *ds, *ds0;
@@ -270,7 +269,7 @@ ath_tx_chaindesclist(struct ath_softc *s
 static void
 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
 {
-	ATH_TXQ_LOCK(txq);
+	ATH_TXQ_LOCK_ASSERT(txq);
 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
 	     ("busy status 0x%x", bf->bf_flags));
 	if (txq->axq_link != NULL) {
@@ -288,7 +287,6 @@ ath_tx_handoff_mcast(struct ath_softc *s
 	}
 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
 	txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
-	ATH_TXQ_UNLOCK(txq);
 }
 
 
@@ -309,7 +307,7 @@ ath_tx_handoff_hw(struct ath_softc *sc, 
 	 * the SWBA handler since frames only go out on DTIM and
 	 * to avoid possible races.
 	 */
-	ATH_TXQ_LOCK(txq);
+	ATH_TXQ_LOCK_ASSERT(txq);
 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
 	     ("busy status 0x%x", bf->bf_flags));
 	KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
@@ -391,12 +389,17 @@ ath_tx_handoff_hw(struct ath_softc *sc, 
 		txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
 		ath_hal_txstart(ah, txq->axq_qnum);
 	}
-	ATH_TXQ_UNLOCK(txq);
 }
 
+/*
+ * Hand off a packet to the hardware (or mcast queue.)
+ * The relevant hardware txq should be locked.
+ */
 static void
 ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
 {
+	ATH_TXQ_LOCK_ASSERT(txq);
+
 	if (txq->axq_qnum == ATH_TXQ_SWQ)
 		ath_tx_handoff_mcast(sc, txq, bf);
 	else
@@ -939,12 +942,6 @@ ath_tx_start(struct ath_softc *sc, struc
 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
 
 	/* A-MPDU TX */
-	/*
-	 * XXX This doesn't hold the node lock!
-	 * XXX So it's possible that another TX process can change
-	 * XXX the AMPDU status from underneath us. This needs to be
-	 * XXX further investigated!
-	 */
 	is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
 	is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
 	is_ampdu = is_ampdu_tx | is_ampdu_pending;
@@ -989,11 +986,15 @@ ath_tx_start(struct ath_softc *sc, struc
 	//m0 = bf->bf_m;
 
 	/* Fill in the details in the descriptor list */
-	ath_tx_chaindesclist(sc, txq, bf);
+	ath_tx_chaindesclist(sc, bf);
 
 #if 1
+	ATH_TXQ_LOCK(txq);
 	/* add to software queue */
 	ath_tx_swq(sc, ni, txq, bf, m0);
+	/* Kick txq */
+	ath_txq_sched(sc, txq);
+	ATH_TXQ_UNLOCK(txq);
 #else
 
 	/*
@@ -1215,7 +1216,7 @@ ath_tx_raw_start(struct ath_softc *sc, s
 	}
 
 	/* Fill in the details in the descriptor list */
-	ath_tx_chaindesclist(sc, sc->sc_ac2q[pri], bf);
+	ath_tx_chaindesclist(sc, bf);
 
 	/*
 	 * If we're overiding the ADDBA destination, dump directly
@@ -1223,6 +1224,7 @@ ath_tx_raw_start(struct ath_softc *sc, s
 	 * frames to that node are.
 	 */
 
+	ATH_TXQ_LOCK(sc->sc_ac2q[pri]);
 	if (do_override)
 		ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
 	else {
@@ -1230,8 +1232,9 @@ ath_tx_raw_start(struct ath_softc *sc, s
 		ath_tx_swq(sc, ni, sc->sc_ac2q[pri], bf, m0);
 
 		/* Kick txq */
-		ath_txq_sched(sc);
+		ath_txq_sched(sc, sc->sc_ac2q[pri]);
 	}
+	ATH_TXQ_UNLOCK(sc->sc_ac2q[pri]);
 
 	return 0;
 }
@@ -1393,42 +1396,48 @@ ath_tx_action_frame_override_queue(struc
 /*
  * Mark the current node/TID as ready to TX.
  *
- * This is done to make it easy for the software scheduler to 
+ * This is done to make it easy for the software scheduler to
  * find which nodes have data to send.
  *
- * The node and txnode locks should be held.
+ * The relevant hw txq lock should be held.
  */
 static void
 ath_tx_node_sched(struct ath_softc *sc, struct ath_node *an, int tid)
 {
-	ATH_NODE_LOCK_ASSERT(an);
-	ATH_TXNODE_LOCK_ASSERT(sc);
+	struct ath_tid *atid = &an->an_tid[tid];
+	int ac = TID_TO_WME_AC(tid);
+	struct ath_txq *txq = sc->sc_ac2q[ac];
+
+	ATH_TXQ_LOCK_ASSERT(txq);
 
-	if (an->sched)
+	if (atid->sched)
 		return;		/* already scheduled */
 
-	an->sched = 1;
+	atid->sched = 1;
 
-	STAILQ_INSERT_TAIL(&sc->sc_txnodeq, an, an_list);
+	STAILQ_INSERT_TAIL(&txq->axq_tidq, atid, axq_qelem);
 }
 
 /*
  * Mark the current node as no longer needing to be polled for
  * TX packets.
  *
- * The node and txnode locks should be held.
+ * The relevant hw txq lock should be held.
  */
 static void
 ath_tx_node_unsched(struct ath_softc *sc, struct ath_node *an, int tid)
 {
-	ATH_NODE_LOCK_ASSERT(an);
-	ATH_TXNODE_LOCK_ASSERT(sc);
+	struct ath_tid *atid = &an->an_tid[tid];
+	int ac = TID_TO_WME_AC(tid);
+	struct ath_txq *txq = sc->sc_ac2q[ac];
 
-	if (an->sched == 0)
+	ATH_TXQ_LOCK_ASSERT(txq);
+
+	if (atid->sched == 0)
 		return;
 
-	an->sched = 0;
-	STAILQ_REMOVE(&sc->sc_txnodeq, an, ath_node, an_list);
+	atid->sched = 0;
+	STAILQ_REMOVE(&txq->axq_tidq, atid, ath_tid, axq_qelem);
 }
 
 /*
@@ -1477,6 +1486,8 @@ ath_tx_swq(struct ath_softc *sc, struct 
 	struct ath_tid *atid;
 	int pri, tid;
 
+	ATH_TXQ_LOCK_ASSERT(txq);
+
 	/* Fetch the TID - non-QoS frames get assigned to TID 16 */
 	wh = mtod(m0, struct ieee80211_frame *);
 	pri = M_WME_GETAC(m0);			/* honor classification */
@@ -1488,22 +1499,10 @@ ath_tx_swq(struct ath_softc *sc, struct 
 	bf->bf_state.bfs_txq = txq;
 
 	/* Queue frame to the tail of the software queue */
-	ATH_TXQ_LOCK(atid);
 	ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
-	ATH_TXQ_UNLOCK(atid);
 
-	/*
-	 * ATH_TXNODE must be acquired before ATH_NODE is acquired
-	 * if they're both needed.
-	 */
-	ATH_TXNODE_LOCK(sc);
-	ATH_NODE_LOCK(an);
-	/* Bump queued packet counter */
-	an->an_qdepth++;
-	/* Mark the given node as having packets to dequeue */
+	/* Mark the given tid as having packets to dequeue */
 	ath_tx_node_sched(sc, an, tid);
-	ATH_NODE_UNLOCK(an);
-	ATH_TXNODE_UNLOCK(sc);
 }
 
 /*
@@ -1533,16 +1532,12 @@ ath_tx_tid_init(struct ath_softc *sc, st
 {
 	int i;
 	struct ath_tid *atid;
-	struct ieee80211_node *ni = &an->an_node;
 
 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
 		atid = &an->an_tid[i];
 		STAILQ_INIT(&atid->axq_q);
-		snprintf(atid->axq_name, sizeof(atid->axq_name),
-		    "%s_a%d_t%d\n", device_get_nameunit(sc->sc_dev),
-		    ni->ni_associd,
-		    i);
-		mtx_init(&atid->axq_lock, atid->axq_name, NULL, MTX_DEF);
+		atid->tid = i;
+		atid->an = an;
 	}
 }
 
@@ -1570,8 +1565,6 @@ ath_tx_tid_txq_unmark(struct ath_softc *
  *
  * It can also be called on an active node during an interface
  * reset or state transition.
- *
- * This doesn't update an->an_qdepth!
  */
 static void
 ath_tx_tid_free_pkts(struct ath_softc *sc, struct ath_node *an,
@@ -1579,17 +1572,18 @@ ath_tx_tid_free_pkts(struct ath_softc *s
 {
 	struct ath_tid *atid = &an->an_tid[tid];
 	struct ath_buf *bf;
+	int ac = TID_TO_WME_AC(tid);
+	struct ath_txq *txq = sc->sc_ac2q[ac];
+
+	ATH_TXQ_LOCK_ASSERT(txq);
 
 	/* Walk the queue, free frames */
 	for (;;) {
-		ATH_TXQ_LOCK(atid);
 		bf = STAILQ_FIRST(&atid->axq_q);
 		if (bf == NULL) {
-			ATH_TXQ_UNLOCK(atid);
 			break;
 		}
 		ATH_TXQ_REMOVE_HEAD(atid, bf_list);
-		ATH_TXQ_UNLOCK(atid);
 		ath_tx_freebuf(sc, bf, -1);
 	}
 }
@@ -1602,8 +1596,18 @@ ath_tx_node_flush(struct ath_softc *sc, 
 {
 	int tid;
 
-	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++)
+	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
+		int ac = TID_TO_WME_AC(tid);
+		struct ath_txq *txq = sc->sc_ac2q[ac];
+
+		ATH_TXQ_LOCK(txq);
+		/* Remove this tid from the list of active tids */
+		ath_tx_node_unsched(sc, an, tid);
+
+		/* Free packets */
 		ath_tx_tid_free_pkts(sc, an, tid);
+		ATH_TXQ_UNLOCK(txq);
+	}
 
 	/*
 	 * Don't hold the node lock across free_pkts;
@@ -1611,9 +1615,6 @@ ath_tx_node_flush(struct ath_softc *sc, 
 	 * that will acquire the IEEE80211_NODE_LOCK (node table).
 	 * That then causes a lock reversal.
 	 */
-	ATH_NODE_LOCK(an);
-	an->an_qdepth = 0;
-	ATH_NODE_UNLOCK(an);
 }
 
 /*
@@ -1625,35 +1626,16 @@ ath_tx_node_flush(struct ath_softc *sc, 
 void
 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an)
 {
-	int i;
+	int tid;
 	struct ath_tid *atid;
-	struct ieee80211_node *ni = &an->an_node;
-
-	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
-		atid = &an->an_tid[i];
 
-		/* Free packets in sw queue */
-		/* For now, just print a loud warning if it occurs */
-		ATH_TXQ_LOCK(atid);
-		if (! STAILQ_EMPTY(&atid->axq_q))
-			device_printf(sc->sc_dev, "%s: AID %d TID %d queue not "
-			    "empty on queue destroy!\n",
-			    __func__, ni->ni_associd, i);
-		ATH_TXQ_UNLOCK(atid);
-		ath_tx_tid_free_pkts(sc, an, i);
+	/* Flush all packets currently in the sw queues for this node */
+	ath_tx_node_flush(sc, an);
 
+	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
+		atid = &an->an_tid[tid];
 		/* Mark hw-queued packets as having no parent now */
-		ath_tx_tid_txq_unmark(sc, an, i);
-
-		/* Remove any pending hardware TXQ scheduling */
-		ATH_TXNODE_LOCK(sc);
-		ATH_NODE_LOCK(an);
-		ath_tx_node_unsched(sc, an, i);
-		ATH_NODE_UNLOCK(an);
-		ATH_TXNODE_UNLOCK(sc);
-
-		/* Free mutex */
-		mtx_destroy(&atid->axq_lock);
+		ath_tx_tid_txq_unmark(sc, an, tid);
 	}
 }
 
@@ -1695,10 +1677,8 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
 	struct ath_tid *atid = &an->an_tid[tid];
 
 	for (;;) {
-		ATH_TXQ_LOCK(atid);
                 bf = STAILQ_FIRST(&atid->axq_q);
 		if (bf == NULL) {
-			ATH_TXQ_UNLOCK(atid);
 			break;
 		}
 
@@ -1712,14 +1692,10 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
 		 * XXX and progress can be made.
 		 */
 		ATH_TXQ_REMOVE_HEAD(atid, bf_list);
-		ATH_TXQ_UNLOCK(atid);
-
-		/* Remove from queue */
-		ATH_NODE_LOCK(an);
-		an->an_qdepth--;
-		ATH_NODE_UNLOCK(an);
 
 		txq = bf->bf_state.bfs_txq;
+		ATH_TXQ_LOCK_ASSERT(txq);
+
 		/* Sanity check! */
 		if (tid != bf->bf_state.bfs_tid) {
 			device_printf(sc->sc_dev, "%s: bfs_tid %d !="
@@ -1735,8 +1711,6 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
 	}
 }
 
-
-
 /*
  * Schedule some packets from the given node/TID to the hardware.
  */
@@ -1748,21 +1722,14 @@ ath_tx_tid_hw_queue_norm(struct ath_soft
 	struct ath_tid *atid = &an->an_tid[tid];
 
 	for (;;) {
-		ATH_TXQ_LOCK(atid);
                 bf = STAILQ_FIRST(&atid->axq_q);
 		if (bf == NULL) {
-			ATH_TXQ_UNLOCK(atid);
 			break;
 		}
 		ATH_TXQ_REMOVE_HEAD(atid, bf_list);
-		ATH_TXQ_UNLOCK(atid);
-
-		/* Remove from queue */
-		ATH_NODE_LOCK(an);
-		an->an_qdepth--;
-		ATH_NODE_UNLOCK(an);
 
 		txq = bf->bf_state.bfs_txq;
+		ATH_TXQ_LOCK_ASSERT(txq);
 		/* Sanity check! */
 		if (tid != bf->bf_state.bfs_tid) {
 			device_printf(sc->sc_dev, "%s: bfs_tid %d !="
@@ -1781,6 +1748,7 @@ ath_tx_tid_hw_queue_norm(struct ath_soft
 void
 ath_tx_hw_queue(struct ath_softc *sc, struct ath_node *an)
 {
+#if 0
 	struct ath_tid *atid;
 	int tid;
 	int isempty;
@@ -1792,12 +1760,9 @@ ath_tx_hw_queue(struct ath_softc *sc, st
 	 */
 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
 		atid = &an->an_tid[tid];
-		ATH_NODE_LOCK(an);
 		if (ath_tx_ampdu_pending(sc, an, tid)) {
-			ATH_NODE_UNLOCK(an);
 			continue;
 		}
-		ATH_NODE_UNLOCK(an);
 		if (ath_tx_ampdu_running(sc, an, tid))
 			ath_tx_tid_hw_queue_aggr(sc, an, tid);
 		else
@@ -1820,36 +1785,45 @@ ath_tx_hw_queue(struct ath_softc *sc, st
 		}
 		ATH_TXQ_UNLOCK(atid);
 	}
-}
-
-#if 0
-static int
-ath_txq_node_qlen(struct ath_softc *sc, struct ath_node *an)
-{
-	ATH_NODE_LOCK_ASSERT(an);
-	return an->an_qdepth;
-}
 #endif
+}
 
 /*
- * Handle scheduling some packets from whichever nodes have
- * signaled they're (potentially) ready.
+ * Schedule some packets to the given hardware queue.
+ *
+ * This function walks the list of TIDs (ie, ath_node TIDs
+ * with queued traffic) and attempts to schedule traffic
+ * from them.
+ *
+ * This must be called with the HW TXQ lock held.
  */
 void
-ath_txq_sched(struct ath_softc *sc)
+ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
 {
-	struct ath_node *an, *next;
+	ATH_TXQ_LOCK_ASSERT(txq);
+	struct ath_tid *atid, *next;
+
+	/*
+	 * For now, let's not worry about QoS, fair-scheduling
+	 * or the like. That's a later problem. Just throw
+	 * packets at the hardware.
+	 */
+	STAILQ_FOREACH_SAFE(atid, &txq->axq_tidq, axq_qelem, next) {
+		if (ath_tx_ampdu_pending(sc, atid->an, atid->tid)) {
+			/* XXX TODO should remove it from the list */
+			continue;
+		}
+		if (ath_tx_ampdu_running(sc, atid->an, atid->tid))
+			ath_tx_tid_hw_queue_aggr(sc, atid->an, atid->tid);
+		else
+			ath_tx_tid_hw_queue_norm(sc, atid->an, atid->tid);
 
-	ATH_TXNODE_LOCK(sc);
-	/* Iterate over the list of active nodes, queuing packets */
-	STAILQ_FOREACH_SAFE(an, &sc->sc_txnodeq, an_list, next) {
-		/* Try dequeueing packets from the current node */
-		ath_tx_hw_queue(sc, an);
+		/* Empty? Remove */
+		if (atid->axq_depth == 0)
+			ath_tx_node_unsched(sc, atid->an, atid->tid);
 	}
-	ATH_TXNODE_UNLOCK(sc);
 }
 
-
 /*
  * TX addba handling
  */
@@ -1875,16 +1849,12 @@ ath_tx_get_tx_tid(struct ath_node *an, i
 
 /*
  * Is AMPDU-TX running?
- *
- * The ATH_NODE lock must be held.
  */
 static int
 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
 {
 	struct ieee80211_tx_ampdu *tap;
 
-//	ATH_NODE_LOCK_ASSERT(an);
-
 	tap = ath_tx_get_tx_tid(an, tid);
 	if (tap == NULL)
 		return 0;	/* Not valid; default to not running */
@@ -1894,16 +1864,12 @@ ath_tx_ampdu_running(struct ath_softc *s
 
 /*
  * Is AMPDU-TX negotiation pending?
- *
- * The ATH_NODE lock must be held.
  */
 static int
 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
 {
 	struct ieee80211_tx_ampdu *tap;
 
-//	ATH_NODE_LOCK_ASSERT(an);
-
 	tap = ath_tx_get_tx_tid(an, tid);
 	if (tap == NULL)
 		return 0;	/* Not valid; default to not pending */
@@ -1914,8 +1880,6 @@ ath_tx_ampdu_pending(struct ath_softc *s
 
 /*
  * Is AMPDU-TX pending for the given TID?
- *
- * The ATH_NODE lock must be held.
  */
 
 
@@ -1949,6 +1913,11 @@ ath_addba_response(struct ieee80211_node
     int dialogtoken, int code, int batimeout)
 {
 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+
+	/*
+	 * XXX todo: resume the ath_node/TID now, rather than
+	 * XXX waiting for the next packet to trigger a TX.
+	 */
 	return sc->sc_addba_response(ni, tap, dialogtoken, code, batimeout);
 }
 

Modified: user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.h
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.h	Wed Aug  3 06:51:14 2011	(r224624)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.h	Wed Aug  3 07:00:55 2011	(r224625)
@@ -52,7 +52,7 @@ extern void ath_tx_tid_hw_queue_aggr(str
 extern void ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
     int tid);
 extern void ath_tx_hw_queue(struct ath_softc *sc, struct ath_node *an);
-extern void ath_txq_sched(struct ath_softc *sc);
+extern void ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq);
 
 /* TX addba handling */
 extern	int ath_addba_request(struct ieee80211_node *ni,

Modified: user/adrian/if_ath_tx/sys/dev/ath/if_athvar.h
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_athvar.h	Wed Aug  3 06:51:14 2011	(r224624)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_athvar.h	Wed Aug  3 07:00:55 2011	(r224625)
@@ -93,8 +93,15 @@ struct ath_buf;
 struct ath_tid {
 	STAILQ_HEAD(,ath_buf) axq_q;		/* pending buffers        */
 	u_int			axq_depth;	/* SW queue depth */
-	struct mtx		axq_lock;	/* lock on queue, tx_buf */
-	char			axq_name[24];	/* e.g. "wlan0_a1_t5" */
+	struct ath_node		*an;		/* pointer to parent */
+	int			tid;		/* tid */
+
+	/*
+	 * Entry on the ath_txq; when there's traffic
+	 * to send
+	 */
+	STAILQ_ENTRY(ath_tid)	axq_qelem;
+	int			sched;
 
 	/*
 	 * The following implements a ring representing
@@ -115,13 +122,8 @@ struct ath_node {
 	struct ieee80211_node an_node;	/* base class */
 	u_int8_t	an_mgmtrix;	/* min h/w rate index */
 	u_int8_t	an_mcastrix;	/* mcast h/w rate index */
-	STAILQ_ENTRY(ath_node)	an_list;	/* Per hw-txq entry, added if there
-						 * is traffic in this queue to send */
-	int		sched;			/* Whether this node has been
-						 * added to the txq axq_nodeq */
 	struct ath_buf	*an_ff_buf[WME_NUM_AC]; /* ff staging area */
 	struct ath_tid	an_tid[IEEE80211_TID_SIZE];	/* per-TID state */
-	u_int		an_qdepth;	/* Current queue depth of all TIDs */
 	char		an_name[32];	/* eg "wlan0_a1" */
 	struct mtx	an_mtx;		/* protecting the ath_node state */
 	/* variable-length rate control state follows */
@@ -219,6 +221,9 @@ struct ath_txq {
 	STAILQ_HEAD(, ath_buf)	axq_q;		/* transmit queue */
 	struct mtx		axq_lock;	/* lock on q and link */
 	char			axq_name[12];	/* e.g. "ath0_txq4" */
+
+	/* Per-TID traffic queue for software -> hardware TX */
+	STAILQ_HEAD(,ath_tid)	axq_tidq;
 };
 
 #define	ATH_NODE_LOCK(_an)		mtx_lock(&(_an)->an_mtx)
@@ -425,11 +430,6 @@ struct ath_softc {
 	int			sc_dodfs;	/* Whether to enable DFS rx filter bits */
 	struct task		sc_dfstask;	/* DFS processing task */
 
-	/* Software TX queue related state */
-	struct mtx		sc_txnodeq_mtx;	/* mutex protecting the below */
-	STAILQ_HEAD(, ath_node)	sc_txnodeq;	/* Nodes which have traffic to send */
-	char			sc_txnodeq_name[16];	/* mutex name */
-
 	/* TX AMPDU handling */
 	int			(*sc_addba_request)(struct ieee80211_node *,
 				    struct ieee80211_tx_ampdu *, int, int, int);
@@ -447,14 +447,6 @@ struct ath_softc {
 #define	ATH_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
 #define	ATH_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
 
-#define	ATH_TXNODE_LOCK_INIT(_sc) \
-	mtx_init(&(_sc)->sc_txnodeq_mtx, (sc)->sc_txnodeq_name, \
-	NULL, MTX_DEF | MTX_RECURSE)
-#define	ATH_TXNODE_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_txnodeq_mtx)
-#define	ATH_TXNODE_LOCK(_sc)		mtx_lock(&(_sc)->sc_txnodeq_mtx)
-#define	ATH_TXNODE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_txnodeq_mtx)
-#define	ATH_TXNODE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_txnodeq_mtx, MA_OWNED)
-
 #define	ATH_TXQ_SETUP(sc, i)	((sc)->sc_txqsetup & (1<<i))
 
 #define	ATH_TXBUF_LOCK_INIT(_sc) do { \



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201108030700.p7370tOH090668>