Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 17 May 2012 15:02:51 +0000 (UTC)
From:      Luigi Rizzo <luigi@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-9@freebsd.org
Subject:   svn commit: r235549 - in stable/9: sys/dev/netmap sys/net tools/tools/netmap
Message-ID:  <201205171502.q4HF2pIi003919@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: luigi
Date: Thu May 17 15:02:51 2012
New Revision: 235549
URL: http://svn.freebsd.org/changeset/base/235549

Log:
  MFC: the netmap code from HEAD, now supported in the ixgbe/ and e1000/
  drivers (re/ will come in the next commit)

Modified:
  stable/9/sys/dev/netmap/if_em_netmap.h
  stable/9/sys/dev/netmap/if_igb_netmap.h
  stable/9/sys/dev/netmap/if_lem_netmap.h
  stable/9/sys/dev/netmap/if_re_netmap.h
  stable/9/sys/dev/netmap/ixgbe_netmap.h
  stable/9/sys/dev/netmap/netmap.c
  stable/9/sys/dev/netmap/netmap_kern.h
  stable/9/sys/net/netmap.h
  stable/9/sys/net/netmap_user.h
  stable/9/tools/tools/netmap/README
  stable/9/tools/tools/netmap/bridge.c
  stable/9/tools/tools/netmap/pcap.c
  stable/9/tools/tools/netmap/pkt-gen.c

Modified: stable/9/sys/dev/netmap/if_em_netmap.h
==============================================================================
--- stable/9/sys/dev/netmap/if_em_netmap.h	Thu May 17 14:37:01 2012	(r235548)
+++ stable/9/sys/dev/netmap/if_em_netmap.h	Thu May 17 15:02:51 2012	(r235549)
@@ -25,49 +25,25 @@
 
 /*
  * $FreeBSD$
- * $Id: if_em_netmap.h 9802 2011-12-02 18:42:37Z luigi $
+ * $Id: if_em_netmap.h 10627 2012-02-23 19:37:15Z luigi $
  *
- * netmap changes for if_em.
+ * netmap support for em.
  *
- * For structure and details on the individual functions please see
- * ixgbe_netmap.h
+ * For more details on netmap support please see ixgbe_netmap.h
  */
 
+
 #include <net/netmap.h>
 #include <sys/selinfo.h>
 #include <vm/vm.h>
 #include <vm/pmap.h>    /* vtophys ? */
 #include <dev/netmap/netmap_kern.h>
 
+
 static void	em_netmap_block_tasks(struct adapter *);
 static void	em_netmap_unblock_tasks(struct adapter *);
-static int	em_netmap_reg(struct ifnet *, int onoff);
-static int	em_netmap_txsync(struct ifnet *, u_int, int);
-static int	em_netmap_rxsync(struct ifnet *, u_int, int);
-static void	em_netmap_lock_wrapper(struct ifnet *, int, u_int);
-
-static void
-em_netmap_attach(struct adapter *adapter)
-{
-	struct netmap_adapter na;
-
-	bzero(&na, sizeof(na));
-
-	na.ifp = adapter->ifp;
-	na.separate_locks = 1;
-	na.num_tx_desc = adapter->num_tx_desc;
-	na.num_rx_desc = adapter->num_rx_desc;
-	na.nm_txsync = em_netmap_txsync;
-	na.nm_rxsync = em_netmap_rxsync;
-	na.nm_lock = em_netmap_lock_wrapper;
-	na.nm_register = em_netmap_reg;
-	netmap_attach(&na, adapter->num_queues);
-}
 
 
-/*
- * wrapper to export locks to the generic code
- */
 static void
 em_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
 {
@@ -137,8 +113,9 @@ em_netmap_unblock_tasks(struct adapter *
 	}
 }
 
+
 /*
- * register-unregister routine
+ * Register/unregister routine
  */
 static int
 em_netmap_reg(struct ifnet *ifp, int onoff)
@@ -170,7 +147,7 @@ em_netmap_reg(struct ifnet *ifp, int ono
 		}
 	} else {
 fail:
-		/* restore if_transmit */
+		/* return to non-netmap mode */
 		ifp->if_transmit = na->if_transmit;
 		ifp->if_capenable &= ~IFCAP_NETMAP;
 		em_init_locked(adapter);	/* also enable intr */
@@ -179,18 +156,19 @@ fail:
 	return (error);
 }
 
+
 /*
- * Reconcile hardware and user view of the transmit ring.
+ * Reconcile kernel and user view of the transmit ring.
  */
 static int
 em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
 {
 	struct adapter *adapter = ifp->if_softc;
 	struct tx_ring *txr = &adapter->tx_rings[ring_nr];
-	struct netmap_adapter *na = NA(adapter->ifp);
+	struct netmap_adapter *na = NA(ifp);
 	struct netmap_kring *kring = &na->tx_rings[ring_nr];
 	struct netmap_ring *ring = kring->ring;
-	int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
+	u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
 
 	/* generate an interrupt approximately every half ring */
 	int report_frequency = kring->nkr_num_slots >> 1;
@@ -204,18 +182,17 @@ em_netmap_txsync(struct ifnet *ifp, u_in
 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 			BUS_DMASYNC_POSTREAD);
 
-	/* check for new packets to send.
-	 * j indexes the netmap ring, l indexes the nic ring, and
-	 *	j = kring->nr_hwcur, l = E1000_TDT (not tracked),
-	 *	j == (l + kring->nkr_hwofs) % ring_size
+	/*
+	 * Process new packets to send. j is the current index in the
+	 * netmap ring, l is the corresponding index in the NIC ring.
 	 */
 	j = kring->nr_hwcur;
-	if (j != k) {	/* we have packets to send */
-		l = j - kring->nkr_hwofs;
-		if (l < 0)
-			l += lim + 1;
-		while (j != k) {
+	if (j != k) {	/* we have new packets to send */
+		l = netmap_idx_k2n(kring, j);
+		for (n = 0; j != k; n++) {
+			/* slot is the current slot in the netmap ring */
 			struct netmap_slot *slot = &ring->slot[j];
+			/* curr is the current slot in the nic ring */
 			struct e1000_tx_desc *curr = &txr->tx_base[l];
 			struct em_buffer *txbuf = &txr->tx_buffers[l];
 			int flags = ((slot->flags & NS_REPORT) ||
@@ -223,7 +200,8 @@ em_netmap_txsync(struct ifnet *ifp, u_in
 					E1000_TXD_CMD_RS : 0;
 			uint64_t paddr;
 			void *addr = PNMB(slot, &paddr);
-			int len = slot->len;
+			u_int len = slot->len;
+
 			if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
 				if (do_lock)
 					EM_TX_UNLOCK(txr);
@@ -231,26 +209,21 @@ em_netmap_txsync(struct ifnet *ifp, u_in
 			}
 
 			slot->flags &= ~NS_REPORT;
-			curr->upper.data = 0;
-			curr->lower.data = 
-			    htole32(adapter->txd_cmd | len |
-				(E1000_TXD_CMD_EOP | flags) );
 			if (slot->flags & NS_BUF_CHANGED) {
 				curr->buffer_addr = htole64(paddr);
 				/* buffer has changed, reload map */
 				netmap_reload_map(txr->txtag, txbuf->map, addr);
 				slot->flags &= ~NS_BUF_CHANGED;
 			}
-
+			curr->upper.data = 0;
+			curr->lower.data = htole32(adapter->txd_cmd | len |
+				(E1000_TXD_CMD_EOP | flags) );
 			bus_dmamap_sync(txr->txtag, txbuf->map,
 				BUS_DMASYNC_PREWRITE);
 			j = (j == lim) ? 0 : j + 1;
 			l = (l == lim) ? 0 : l + 1;
-			n++;
 		}
-		kring->nr_hwcur = k;
-
-		/* decrease avail by number of sent packets */
+		kring->nr_hwcur = k; /* the saved ring->cur */
 		kring->nr_hwavail -= n;
 
 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
@@ -262,9 +235,9 @@ em_netmap_txsync(struct ifnet *ifp, u_in
 	if (n == 0 || kring->nr_hwavail < 1) {
 		int delta;
 
-		/* record completed transmissions using THD. */
+		/* record completed transmissions using TDH */
 		l = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr));
-		if (l >= kring->nkr_num_slots) { /* XXX can happen */
+		if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
 			D("TDH wrap %d", l);
 			l -= kring->nkr_num_slots;
 		}
@@ -277,7 +250,7 @@ em_netmap_txsync(struct ifnet *ifp, u_in
 			kring->nr_hwavail += delta;
 		}
 	}
-	/* update avail to what the hardware knows */
+	/* update avail to what the kernel knows */
 	ring->avail = kring->nr_hwavail;
 
 	if (do_lock)
@@ -285,6 +258,7 @@ em_netmap_txsync(struct ifnet *ifp, u_in
 	return 0;
 }
 
+
 /*
  * Reconcile kernel and user view of the receive ring.
  */
@@ -293,10 +267,12 @@ em_netmap_rxsync(struct ifnet *ifp, u_in
 {
 	struct adapter *adapter = ifp->if_softc;
 	struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
-	struct netmap_adapter *na = NA(adapter->ifp);
+	struct netmap_adapter *na = NA(ifp);
 	struct netmap_kring *kring = &na->rx_rings[ring_nr];
 	struct netmap_ring *ring = kring->ring;
-	int j, k, l, n, lim = kring->nkr_num_slots - 1;
+	u_int j, l, n, lim = kring->nkr_num_slots - 1;
+	int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
+	u_int k = ring->cur, resvd = ring->reserved;
 
 	k = ring->cur;
 	if (k > lim)
@@ -304,53 +280,51 @@ em_netmap_rxsync(struct ifnet *ifp, u_in
  
 	if (do_lock)
 		EM_RX_LOCK(rxr);
+
 	/* XXX check sync modes */
 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 
-	/* import newly received packets into the netmap ring.
-	 * j is an index in the netmap ring, l in the NIC ring, and
-	 *	j = (kring->nr_hwcur + kring->nr_hwavail) % ring_size
-	 *	l = rxr->next_to_check;
-	 * and
-	 *	j == (l + kring->nkr_hwofs) % ring_size
+	/*
+	 * Import newly received packets into the netmap ring.
+	 * j is an index in the netmap ring, l in the NIC ring.
 	 */
 	l = rxr->next_to_check;
-	j = l + kring->nkr_hwofs;
-	/* here nkr_hwofs can be negative so must check for j < 0 */
-	if (j < 0)
-		j += lim + 1;
-	else if (j > lim)
-		j -= lim + 1;
-	for (n = 0; ; n++) {
-		struct e1000_rx_desc *curr = &rxr->rx_base[l];
-
-		if ((curr->status & E1000_RXD_STAT_DD) == 0)
-			break;
-		ring->slot[j].len = le16toh(curr->length);
-		bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[l].map,
-			BUS_DMASYNC_POSTREAD);
-		j = (j == lim) ? 0 : j + 1;
-		/* make sure next_to_refresh follows next_to_check */
-		rxr->next_to_refresh = l;	// XXX
-		l = (l == lim) ? 0 : l + 1;
-	}
-	if (n) {
-		rxr->next_to_check = l;
-		kring->nr_hwavail += n;
+	j = netmap_idx_n2k(kring, l);
+	if (netmap_no_pendintr || force_update) {
+		for (n = 0; ; n++) {
+			struct e1000_rx_desc *curr = &rxr->rx_base[l];
+			uint32_t staterr = le32toh(curr->status);
+
+			if ((staterr & E1000_RXD_STAT_DD) == 0)
+				break;
+			ring->slot[j].len = le16toh(curr->length);
+			bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[l].map,
+				BUS_DMASYNC_POSTREAD);
+			j = (j == lim) ? 0 : j + 1;
+			/* make sure next_to_refresh follows next_to_check */
+			rxr->next_to_refresh = l;	// XXX
+			l = (l == lim) ? 0 : l + 1;
+		}
+		if (n) { /* update the state variables */
+			rxr->next_to_check = l;
+			kring->nr_hwavail += n;
+		}
+		kring->nr_kflags &= ~NKR_PENDINTR;
 	}
 
-	/* skip past packets that userspace has already processed */
-	j = kring->nr_hwcur;
-	if (j != k) { /* userspace has read some packets. */
-		n = 0;
-		l = j - kring->nkr_hwofs; /* NIC ring index */
-		/* here nkr_hwofs can be negative so check for l > lim */
-		if (l < 0)
-			l += lim + 1;
-		else if (l > lim)
-			l -= lim + 1;
-		while (j != k) {
+	/* skip past packets that userspace has released */
+	j = kring->nr_hwcur;	/* netmap ring index */
+	if (resvd > 0) {
+		if (resvd + ring->avail >= lim + 1) {
+			D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
+			ring->reserved = resvd = 0; // XXX panic...
+		}
+		k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
+	}
+        if (j != k) { /* userspace has released some packets. */
+		l = netmap_idx_k2n(kring, j); /* NIC ring index */
+		for (n = 0; j != k; n++) {
 			struct netmap_slot *slot = &ring->slot[j];
 			struct e1000_rx_desc *curr = &rxr->rx_base[l];
 			struct em_buffer *rxbuf = &rxr->rx_buffers[l];
@@ -363,20 +337,17 @@ em_netmap_rxsync(struct ifnet *ifp, u_in
 				return netmap_ring_reinit(kring);
 			}
 
-			curr->status = 0;
 			if (slot->flags & NS_BUF_CHANGED) {
 				curr->buffer_addr = htole64(paddr);
 				/* buffer has changed, reload map */
 				netmap_reload_map(rxr->rxtag, rxbuf->map, addr);
 				slot->flags &= ~NS_BUF_CHANGED;
 			}
-
+			curr->status = 0;
 			bus_dmamap_sync(rxr->rxtag, rxbuf->map,
 			    BUS_DMASYNC_PREREAD);
-
 			j = (j == lim) ? 0 : j + 1;
 			l = (l == lim) ? 0 : l + 1;
-			n++;
 		}
 		kring->nr_hwavail -= n;
 		kring->nr_hwcur = k;
@@ -390,8 +361,29 @@ em_netmap_rxsync(struct ifnet *ifp, u_in
 		E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), l);
 	}
 	/* tell userspace that there are new packets */
-	ring->avail = kring->nr_hwavail ;
+	ring->avail = kring->nr_hwavail - resvd;
 	if (do_lock)
 		EM_RX_UNLOCK(rxr);
 	return 0;
 }
+
+
+static void
+em_netmap_attach(struct adapter *adapter)
+{
+	struct netmap_adapter na;
+
+	bzero(&na, sizeof(na));
+
+	na.ifp = adapter->ifp;
+	na.separate_locks = 1;
+	na.num_tx_desc = adapter->num_tx_desc;
+	na.num_rx_desc = adapter->num_rx_desc;
+	na.nm_txsync = em_netmap_txsync;
+	na.nm_rxsync = em_netmap_rxsync;
+	na.nm_lock = em_netmap_lock_wrapper;
+	na.nm_register = em_netmap_reg;
+	netmap_attach(&na, adapter->num_queues);
+}
+
+/* end of file */

Modified: stable/9/sys/dev/netmap/if_igb_netmap.h
==============================================================================
--- stable/9/sys/dev/netmap/if_igb_netmap.h	Thu May 17 14:37:01 2012	(r235548)
+++ stable/9/sys/dev/netmap/if_igb_netmap.h	Thu May 17 15:02:51 2012	(r235549)
@@ -25,42 +25,19 @@
 
 /*
  * $FreeBSD$
- * $Id: if_igb_netmap.h 9802 2011-12-02 18:42:37Z luigi $
+ * $Id: if_igb_netmap.h 10627 2012-02-23 19:37:15Z luigi $
  *
- * netmap modifications for igb
- * contribured by Ahmed Kooli
+ * Netmap support for igb, partly contributed by Ahmed Kooli
+ * For details on netmap support please see ixgbe_netmap.h
  */
 
+
 #include <net/netmap.h>
 #include <sys/selinfo.h>
 #include <vm/vm.h>
 #include <vm/pmap.h>    /* vtophys ? */
 #include <dev/netmap/netmap_kern.h>
 
-static int	igb_netmap_reg(struct ifnet *, int onoff);
-static int	igb_netmap_txsync(struct ifnet *, u_int, int);
-static int	igb_netmap_rxsync(struct ifnet *, u_int, int);
-static void	igb_netmap_lock_wrapper(struct ifnet *, int, u_int);
-
-
-static void
-igb_netmap_attach(struct adapter *adapter)
-{
-	struct netmap_adapter na;
-
-	bzero(&na, sizeof(na));
-
-	na.ifp = adapter->ifp;
-	na.separate_locks = 1;
-	na.num_tx_desc = adapter->num_tx_desc;
-	na.num_rx_desc = adapter->num_rx_desc;
-	na.nm_txsync = igb_netmap_txsync;
-	na.nm_rxsync = igb_netmap_rxsync;
-	na.nm_lock = igb_netmap_lock_wrapper;
-	na.nm_register = igb_netmap_reg;
-	netmap_attach(&na, adapter->num_queues);
-}	
-
 
 /*
  * wrapper to export locks to the generic code
@@ -95,8 +72,7 @@ igb_netmap_lock_wrapper(struct ifnet *if
 
 
 /*
- * support for netmap register/unregisted. We are already under core lock.
- * only called on the first init or the last unregister.
+ * register-unregister routine
  */
 static int
 igb_netmap_reg(struct ifnet *ifp, int onoff)
@@ -106,7 +82,7 @@ igb_netmap_reg(struct ifnet *ifp, int on
 	int error = 0;
 
 	if (na == NULL)
-		return EINVAL;
+		return EINVAL;	/* no netmap support here */
 
 	igb_disable_intr(adapter);
 
@@ -116,7 +92,6 @@ igb_netmap_reg(struct ifnet *ifp, int on
 	if (onoff) {
 		ifp->if_capenable |= IFCAP_NETMAP;
 
-		/* save if_transmit to restore it later */
 		na->if_transmit = ifp->if_transmit;
 		ifp->if_transmit = netmap_start;
 
@@ -130,7 +105,7 @@ fail:
 		/* restore if_transmit */
 		ifp->if_transmit = na->if_transmit;
 		ifp->if_capenable &= ~IFCAP_NETMAP;
-		igb_init_locked(adapter);	/* also enables intr */
+		igb_init_locked(adapter);	/* also enable intr */
 	}
 	return (error);
 }
@@ -144,10 +119,10 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
 {
 	struct adapter *adapter = ifp->if_softc;
 	struct tx_ring *txr = &adapter->tx_rings[ring_nr];
-	struct netmap_adapter *na = NA(adapter->ifp);
+	struct netmap_adapter *na = NA(ifp);
 	struct netmap_kring *kring = &na->tx_rings[ring_nr];
 	struct netmap_ring *ring = kring->ring;
-	int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
+	u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
 
 	/* generate an interrupt approximately every half ring */
 	int report_frequency = kring->nkr_num_slots >> 1;
@@ -161,31 +136,31 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 	    BUS_DMASYNC_POSTREAD);
 
-	/* update avail to what the hardware knows */
-	ring->avail = kring->nr_hwavail;
-
-	j = kring->nr_hwcur; /* netmap ring index */
+	/* check for new packets to send.
+	 * j indexes the netmap ring, l indexes the nic ring, and
+	 *      j = kring->nr_hwcur, l = E1000_TDT (not tracked),
+	 *      j == (l + kring->nkr_hwofs) % ring_size
+	 */
+	j = kring->nr_hwcur;
 	if (j != k) {	/* we have new packets to send */
-		u32 olinfo_status = 0;
-
-		l = j - kring->nkr_hwofs; /* NIC ring index */
-		if (l < 0)
-			l += lim + 1;
 		/* 82575 needs the queue index added */
-		if (adapter->hw.mac.type == e1000_82575)
-			olinfo_status |= txr->me << 4;
+		u32 olinfo_status =
+		    (adapter->hw.mac.type == e1000_82575) ? (txr->me << 4) : 0;
 
-		while (j != k) {
+		l = netmap_idx_k2n(kring, j);
+		for (n = 0; j != k; n++) {
+			/* slot is the current slot in the netmap ring */
 			struct netmap_slot *slot = &ring->slot[j];
-			struct igb_tx_buffer *txbuf = &txr->tx_buffers[l];
+			/* curr is the current slot in the nic ring */
 			union e1000_adv_tx_desc *curr =
 			    (union e1000_adv_tx_desc *)&txr->tx_base[l];
-			uint64_t paddr;
-			void *addr = PNMB(slot, &paddr);
+			struct igb_tx_buffer *txbuf = &txr->tx_buffers[l];
 			int flags = ((slot->flags & NS_REPORT) ||
 				j == 0 || j == report_frequency) ?
 					E1000_ADVTXD_DCMD_RS : 0;
-			int len = slot->len;
+			uint64_t paddr;
+			void *addr = PNMB(slot, &paddr);
+			u_int len = slot->len;
 
 			if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
 				if (do_lock)
@@ -194,8 +169,13 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
 			}
 
 			slot->flags &= ~NS_REPORT;
-			// XXX do we need to set the address ?
+			if (slot->flags & NS_BUF_CHANGED) {
+				/* buffer has changed, reload map */
+				netmap_reload_map(txr->txtag, txbuf->map, addr);
+				slot->flags &= ~NS_BUF_CHANGED;
+			}
 			curr->read.buffer_addr = htole64(paddr);
+			// XXX check olinfo and cmd_type_len
 			curr->read.olinfo_status =
 			    htole32(olinfo_status |
 				(len<< E1000_ADVTXD_PAYLEN_SHIFT));
@@ -204,23 +184,14 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
 				    E1000_ADVTXD_DCMD_IFCS |
 				    E1000_ADVTXD_DCMD_DEXT |
 				    E1000_ADVTXD_DCMD_EOP | flags);
-			if (slot->flags & NS_BUF_CHANGED) {
-				/* buffer has changed, reload map */
-				netmap_reload_map(txr->txtag, txbuf->map, addr);
-				slot->flags &= ~NS_BUF_CHANGED;
-			}
 
 			bus_dmamap_sync(txr->txtag, txbuf->map,
 				BUS_DMASYNC_PREWRITE);
 			j = (j == lim) ? 0 : j + 1;
 			l = (l == lim) ? 0 : l + 1;
-			n++;
 		}
-		kring->nr_hwcur = k;
-
-		/* decrease avail by number of sent packets */
+		kring->nr_hwcur = k; /* the saved ring->cur */
 		kring->nr_hwavail -= n;
-		ring->avail = kring->nr_hwavail;
 
 		/* Set the watchdog XXX ? */
 		txr->queue_status = IGB_QUEUE_WORKING;
@@ -231,23 +202,28 @@ igb_netmap_txsync(struct ifnet *ifp, u_i
 
 		E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), l);
 	}
+
 	if (n == 0 || kring->nr_hwavail < 1) {
 		int delta;
 
-		/* record completed transmission using TDH */
+		/* record completed transmissions using TDH */
 		l = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr));
-		if (l >= kring->nkr_num_slots) /* XXX can it happen ? */
+		if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
+			D("TDH wrap %d", l);
 			l -= kring->nkr_num_slots;
+		}
 		delta = l - txr->next_to_clean;
 		if (delta) {
-			/* new tx were completed */
+			/* some completed, increment hwavail. */
 			if (delta < 0)
 				delta += kring->nkr_num_slots;
 			txr->next_to_clean = l;
 			kring->nr_hwavail += delta;
-			ring->avail = kring->nr_hwavail;
 		}
 	}
+	/* update avail to what the kernel knows */
+	ring->avail = kring->nr_hwavail;
+
 	if (do_lock)
 		IGB_TX_UNLOCK(txr);
 	return 0;
@@ -262,10 +238,12 @@ igb_netmap_rxsync(struct ifnet *ifp, u_i
 {
 	struct adapter *adapter = ifp->if_softc;
 	struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
-	struct netmap_adapter *na = NA(adapter->ifp);
+	struct netmap_adapter *na = NA(ifp);
 	struct netmap_kring *kring = &na->rx_rings[ring_nr];
 	struct netmap_ring *ring = kring->ring;
-	int j, k, l, n, lim = kring->nkr_num_slots - 1;
+	u_int j, l, n, lim = kring->nkr_num_slots - 1;
+	int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
+	u_int k = ring->cur, resvd = ring->reserved;
 
 	k = ring->cur;
 	if (k > lim)
@@ -274,45 +252,48 @@ igb_netmap_rxsync(struct ifnet *ifp, u_i
 	if (do_lock)
 		IGB_RX_LOCK(rxr);
 
-	/* Sync the ring. */
+	/* XXX check sync modes */
 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 
+	/*
+	 * import newly received packets into the netmap ring.
+	 * j is an index in the netmap ring, l in the NIC ring.
+	 */
 	l = rxr->next_to_check;
-	j = l + kring->nkr_hwofs;
-	if (j > lim)
-		j -= lim + 1;
-	for (n = 0; ; n++) {
-		union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
-		uint32_t staterr = le32toh(curr->wb.upper.status_error);
-
-		if ((staterr & E1000_RXD_STAT_DD) == 0)
-			break;
-		ring->slot[j].len = le16toh(curr->wb.upper.length);
-		
-		bus_dmamap_sync(rxr->ptag,
-			rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
-		j = (j == lim) ? 0 : j + 1;
-		l = (l == lim) ? 0 : l + 1;
-	}
-	if (n) {
-		rxr->next_to_check = l;
-		kring->nr_hwavail += n;
+	j = netmap_idx_n2k(kring, l);
+	if (netmap_no_pendintr || force_update) {
+		for (n = 0; ; n++) {
+			union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
+			uint32_t staterr = le32toh(curr->wb.upper.status_error);
+
+			if ((staterr & E1000_RXD_STAT_DD) == 0)
+				break;
+			ring->slot[j].len = le16toh(curr->wb.upper.length);
+			bus_dmamap_sync(rxr->ptag,
+				rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
+			j = (j == lim) ? 0 : j + 1;
+			l = (l == lim) ? 0 : l + 1;
+		}
+		if (n) { /* update the state variables */
+			rxr->next_to_check = l;
+			kring->nr_hwavail += n;
+		}
+		kring->nr_kflags &= ~NKR_PENDINTR;
 	}
 
-	/* skip past packets that userspace has already processed,
-	 * making them available for reception.
-	 * advance nr_hwcur and issue a bus_dmamap_sync on the
-	 * buffers so it is safe to write to them.
-	 * Also increase nr_hwavail
-	 */
-	j = kring->nr_hwcur;
-	l = kring->nr_hwcur - kring->nkr_hwofs;
-	if (l < 0)
-		l += lim + 1;
-	if (j != k) {	/* userspace has read some packets. */
-		n = 0;
-		while (j != k) {
+	/* skip past packets that userspace has released */
+        j = kring->nr_hwcur;    /* netmap ring index */
+	if (resvd > 0) {
+		if (resvd + ring->avail >= lim + 1) {
+			D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
+			ring->reserved = resvd = 0; // XXX panic...
+		}
+		k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
+	}
+	if (j != k) { /* userspace has released some packets. */
+		l = netmap_idx_k2n(kring, j);
+		for (n = 0; j != k; n++) {
 			struct netmap_slot *slot = ring->slot + j;
 			union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
 			struct igb_rx_buf *rxbuf = rxr->rx_buffers + l;
@@ -325,33 +306,51 @@ igb_netmap_rxsync(struct ifnet *ifp, u_i
 				return netmap_ring_reinit(kring);
 			}
 
-			curr->wb.upper.status_error = 0;
-			curr->read.pkt_addr = htole64(paddr);
 			if (slot->flags & NS_BUF_CHANGED) {
 				netmap_reload_map(rxr->ptag, rxbuf->pmap, addr);
 				slot->flags &= ~NS_BUF_CHANGED;
 			}
-
+			curr->read.pkt_addr = htole64(paddr);
+			curr->wb.upper.status_error = 0;
 			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
 				BUS_DMASYNC_PREREAD);
-
 			j = (j == lim) ? 0 : j + 1;
 			l = (l == lim) ? 0 : l + 1;
-			n++;
 		}
 		kring->nr_hwavail -= n;
-		kring->nr_hwcur = ring->cur;
+		kring->nr_hwcur = k;
 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-		/* IMPORTANT: we must leave one free slot in the ring,
+		/*
+		 * IMPORTANT: we must leave one free slot in the ring,
 		 * so move l back by one unit
 		 */
 		l = (l == 0) ? lim : l - 1;
 		E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), l);
 	}
 	/* tell userspace that there are new packets */
-	ring->avail = kring->nr_hwavail ;
+	ring->avail = kring->nr_hwavail - resvd;
 	if (do_lock)
 		IGB_RX_UNLOCK(rxr);
 	return 0;
 }
+
+
+static void
+igb_netmap_attach(struct adapter *adapter)
+{
+	struct netmap_adapter na;
+
+	bzero(&na, sizeof(na));
+
+	na.ifp = adapter->ifp;
+	na.separate_locks = 1;
+	na.num_tx_desc = adapter->num_tx_desc;
+	na.num_rx_desc = adapter->num_rx_desc;
+	na.nm_txsync = igb_netmap_txsync;
+	na.nm_rxsync = igb_netmap_rxsync;
+	na.nm_lock = igb_netmap_lock_wrapper;
+	na.nm_register = igb_netmap_reg;
+	netmap_attach(&na, adapter->num_queues);
+}	
+/* end of file */

Modified: stable/9/sys/dev/netmap/if_lem_netmap.h
==============================================================================
--- stable/9/sys/dev/netmap/if_lem_netmap.h	Thu May 17 14:37:01 2012	(r235548)
+++ stable/9/sys/dev/netmap/if_lem_netmap.h	Thu May 17 15:02:51 2012	(r235549)
@@ -23,14 +23,14 @@
  * SUCH DAMAGE.
  */
 
+
 /*
  * $FreeBSD$
- * $Id: if_lem_netmap.h 9802 2011-12-02 18:42:37Z luigi $
+ * $Id: if_lem_netmap.h 10627 2012-02-23 19:37:15Z luigi $
  *
- * netmap support for if_lem.c
+ * netmap support for "lem"
  *
- * For structure and details on the individual functions please see
- * ixgbe_netmap.h
+ * For details on netmap support please see ixgbe_netmap.h
  */
 
 #include <net/netmap.h>
@@ -39,32 +39,6 @@
 #include <vm/pmap.h>    /* vtophys ? */
 #include <dev/netmap/netmap_kern.h>
 
-static int	lem_netmap_reg(struct ifnet *, int onoff);
-static int	lem_netmap_txsync(struct ifnet *, u_int, int);
-static int	lem_netmap_rxsync(struct ifnet *, u_int, int);
-static void	lem_netmap_lock_wrapper(struct ifnet *, int, u_int);
-
-
-SYSCTL_NODE(_dev, OID_AUTO, lem, CTLFLAG_RW, 0, "lem card");
-
-static void
-lem_netmap_attach(struct adapter *adapter)
-{
-	struct netmap_adapter na;
-
-	bzero(&na, sizeof(na));
-
-	na.ifp = adapter->ifp;
-	na.separate_locks = 1;
-	na.num_tx_desc = adapter->num_tx_desc;
-	na.num_rx_desc = adapter->num_rx_desc;
-	na.nm_txsync = lem_netmap_txsync;
-	na.nm_rxsync = lem_netmap_rxsync;
-	na.nm_lock = lem_netmap_lock_wrapper;
-	na.nm_register = lem_netmap_reg;
-	netmap_attach(&na, 1);
-}
-
 
 static void
 lem_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int ringid)
@@ -96,7 +70,7 @@ lem_netmap_lock_wrapper(struct ifnet *if
 
 
 /*
- * Register/unregister routine
+ * Register/unregister
  */
 static int
 lem_netmap_reg(struct ifnet *ifp, int onoff)
@@ -113,7 +87,6 @@ lem_netmap_reg(struct ifnet *ifp, int on
 	/* Tell the stack that the interface is no longer active */
 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 
-	/* lem_netmap_block_tasks(adapter); */
 #ifndef EM_LEGACY_IRQ // XXX do we need this ?
 	taskqueue_block(adapter->tq);
 	taskqueue_drain(adapter->tq, &adapter->rxtx_task);
@@ -122,9 +95,6 @@ lem_netmap_reg(struct ifnet *ifp, int on
 	if (onoff) {
 		ifp->if_capenable |= IFCAP_NETMAP;
 
-		/* save if_transmit to restore it when exiting.
-		 * XXX what about if_start and if_qflush ?
-		 */
 		na->if_transmit = ifp->if_transmit;
 		ifp->if_transmit = netmap_start;
 
@@ -135,10 +105,10 @@ lem_netmap_reg(struct ifnet *ifp, int on
 		}
 	} else {
 fail:
-		/* restore non-netmap mode */
+		/* return to non-netmap mode */
 		ifp->if_transmit = na->if_transmit;
 		ifp->if_capenable &= ~IFCAP_NETMAP;
-		lem_init_locked(adapter);	/* also enables intr */
+		lem_init_locked(adapter);	/* also enable intr */
 	}
 
 #ifndef EM_LEGACY_IRQ
@@ -156,14 +126,15 @@ static int
 lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
 {
 	struct adapter *adapter = ifp->if_softc;
-	struct netmap_adapter *na = NA(adapter->ifp);
-	struct netmap_kring *kring = &na->tx_rings[0];
+	struct netmap_adapter *na = NA(ifp);
+	struct netmap_kring *kring = &na->tx_rings[ring_nr];
 	struct netmap_ring *ring = kring->ring;
-	int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
+	u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
 
 	/* generate an interrupt approximately every half ring */
 	int report_frequency = kring->nkr_num_slots >> 1;
 
+	/* take a copy of ring->cur now, and never read it again */
 	k = ring->cur;
 	if (k > lim)
 		return netmap_ring_reinit(kring);
@@ -172,25 +143,25 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
 		EM_TX_LOCK(adapter);
 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
 			BUS_DMASYNC_POSTREAD);
-
-	/* update avail to what the hardware knows */
-	ring->avail = kring->nr_hwavail;
-
-	j = kring->nr_hwcur; /* points into the netmap ring */
+	/*
+	 * Process new packets to send. j is the current index in the
+	 * netmap ring, l is the corresponding index in the NIC ring.
+	 */
+	j = kring->nr_hwcur;
 	if (j != k) {	/* we have new packets to send */
-		l = j - kring->nkr_hwofs; /* points into the NIC ring */
-		if (l < 0)
-			l += lim + 1;
-		while (j != k) {
+		l = netmap_idx_k2n(kring, j);
+		for (n = 0; j != k; n++) {
+			/* slot is the current slot in the netmap ring */
 			struct netmap_slot *slot = &ring->slot[j];
+			/* curr is the current slot in the nic ring */
 			struct e1000_tx_desc *curr = &adapter->tx_desc_base[l];
 			struct em_buffer *txbuf = &adapter->tx_buffer_area[l];
-			uint64_t paddr;
-			void *addr = PNMB(slot, &paddr);
 			int flags = ((slot->flags & NS_REPORT) ||
 				j == 0 || j == report_frequency) ?
 					E1000_TXD_CMD_RS : 0;
-			int len = slot->len;
+			uint64_t paddr;
+			void *addr = PNMB(slot, &paddr);
+			u_int len = slot->len;
 
 			if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
 				if (do_lock)
@@ -199,28 +170,24 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
 			}
 
 			slot->flags &= ~NS_REPORT;
-			curr->upper.data = 0;
-			curr->lower.data =
-			    htole32( adapter->txd_cmd | len |
-				(E1000_TXD_CMD_EOP | flags) );
 			if (slot->flags & NS_BUF_CHANGED) {
-				curr->buffer_addr = htole64(paddr);
 				/* buffer has changed, reload map */
 				netmap_reload_map(adapter->txtag, txbuf->map, addr);
+				curr->buffer_addr = htole64(paddr);
 				slot->flags &= ~NS_BUF_CHANGED;
 			}
+			curr->upper.data = 0;
+			curr->lower.data =
+			    htole32( adapter->txd_cmd | len |
+				(E1000_TXD_CMD_EOP | flags) );
 
 			bus_dmamap_sync(adapter->txtag, txbuf->map,
 			    BUS_DMASYNC_PREWRITE);
 			j = (j == lim) ? 0 : j + 1;
 			l = (l == lim) ? 0 : l + 1;
-			n++;
 		}
-		kring->nr_hwcur = k;
-
-		/* decrease avail by number of sent packets */
+		kring->nr_hwcur = k; /* the saved ring->cur */
 		kring->nr_hwavail -= n;
-		ring->avail = kring->nr_hwavail;
 
 		bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -233,19 +200,21 @@ lem_netmap_txsync(struct ifnet *ifp, u_i
 
 		/* record completed transmissions using TDH */
 		l = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
-		if (l >= kring->nkr_num_slots) { /* can it happen ? */
+		if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
 			D("bad TDH %d", l);
 			l -= kring->nkr_num_slots;
 		}
 		delta = l - adapter->next_tx_to_clean;
 		if (delta) {
+			/* some tx completed, increment hwavail. */
 			if (delta < 0)
 				delta += kring->nkr_num_slots;
 			adapter->next_tx_to_clean = l;
 			kring->nr_hwavail += delta;
-			ring->avail = kring->nr_hwavail;
 		}
 	}
+	/* update avail to what the kernel knows */
+	ring->avail = kring->nr_hwavail;
 
 	if (do_lock)
 		EM_TX_UNLOCK(adapter);
@@ -260,57 +229,68 @@ static int
 lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
 {
 	struct adapter *adapter = ifp->if_softc;
-	struct netmap_adapter *na = NA(adapter->ifp);
-	struct netmap_kring *kring = &na->rx_rings[0];
+	struct netmap_adapter *na = NA(ifp);
+	struct netmap_kring *kring = &na->rx_rings[ring_nr];
 	struct netmap_ring *ring = kring->ring;
-	int j, k, l, n, lim = kring->nkr_num_slots - 1;
+	int j, l, n, lim = kring->nkr_num_slots - 1;
+	int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
+	u_int k = ring->cur, resvd = ring->reserved;
 
-	k = ring->cur;
 	if (k > lim)
 		return netmap_ring_reinit(kring);
 
 	if (do_lock)
 		EM_RX_LOCK(adapter);
+
 	/* XXX check sync modes */
 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 
-	/* import newly received packets into the netmap ring */
-	l = adapter->next_rx_desc_to_check; /* points into the NIC ring */
-	j = l + kring->nkr_hwofs; /* points into the netmap ring */
-	if (j > lim)
-		j -= lim + 1;
-	for (n = 0; ; n++) {
-		struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
-		int len;
-
-		if ((curr->status & E1000_RXD_STAT_DD) == 0)
-			break;
-		len = le16toh(curr->length) - 4; // CRC
-
-		if (len < 0) {
-			D("bogus pkt size at %d", j);
-			len = 0;
+	/*
+	 * Import newly received packets into the netmap ring.
+	 * j is an index in the netmap ring, l in the NIC ring.
+	 */
+	l = adapter->next_rx_desc_to_check;
+	j = netmap_idx_n2k(kring, l);
+	if (netmap_no_pendintr || force_update) {
+		for (n = 0; ; n++) {
+			struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
+			uint32_t staterr = le32toh(curr->status);
+			int len;
+
+			if ((staterr & E1000_RXD_STAT_DD) == 0)
+				break;
+			len = le16toh(curr->length) - 4; // CRC
+			if (len < 0) {
+				D("bogus pkt size at %d", j);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201205171502.q4HF2pIi003919>