Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 29 Jan 2019 10:28:51 +0000 (UTC)
From:      Vincenzo Maffione <vmaffione@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r343549 - head/sys/dev/netmap
Message-ID:  <201901291028.x0TASpgu079438@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: vmaffione
Date: Tue Jan 29 10:28:50 2019
New Revision: 343549
URL: https://svnweb.freebsd.org/changeset/base/343549

Log:
  netmap: add notifications on kloop stop
  
  On sync-kloop stop, send a wake-up signal to the kloop, so that
  waiting for the timeout is not needed.
  Also, improve logging in netmap_freebsd.c.
  
  MFC after:	3 days

Modified:
  head/sys/dev/netmap/netmap_bdg.c
  head/sys/dev/netmap/netmap_freebsd.c
  head/sys/dev/netmap/netmap_kloop.c

Modified: head/sys/dev/netmap/netmap_bdg.c
==============================================================================
--- head/sys/dev/netmap/netmap_bdg.c	Tue Jan 29 10:21:41 2019	(r343548)
+++ head/sys/dev/netmap/netmap_bdg.c	Tue Jan 29 10:28:50 2019	(r343549)
@@ -1141,8 +1141,8 @@ netmap_bwrap_intr_notify(struct netmap_kring *kring, i
 		goto put_out;
 	if (kring->nr_hwcur == kring->nr_hwtail) {
 		if (netmap_verbose)
-			nm_prerr("how strange, interrupt with no packets on %s",
-			    na->name);
+			nm_prlim(1, "interrupt with no packets on %s",
+				kring->name);
 		goto put_out;
 	}
 

Modified: head/sys/dev/netmap/netmap_freebsd.c
==============================================================================
--- head/sys/dev/netmap/netmap_freebsd.c	Tue Jan 29 10:21:41 2019	(r343548)
+++ head/sys/dev/netmap/netmap_freebsd.c	Tue Jan 29 10:28:50 2019	(r343549)
@@ -240,7 +240,7 @@ nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *dat
 	static int notsupported = 0;
 	if (!notsupported) {
 		notsupported = 1;
-		D("inet4 segmentation not supported");
+		nm_prerr("inet4 segmentation not supported");
 	}
 #endif
 }
@@ -256,7 +256,7 @@ nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *
 	static int notsupported = 0;
 	if (!notsupported) {
 		notsupported = 1;
-		D("inet6 segmentation not supported");
+		nm_prerr("inet6 segmentation not supported");
 	}
 #endif
 }
@@ -288,8 +288,9 @@ freebsd_generic_rx_handler(struct ifnet *ifp, struct m
 {
 	int stolen;
 
-	if (!NM_NA_VALID(ifp)) {
-		RD(1, "Warning: got RX packet for invalid emulated adapter");
+	if (unlikely(!NM_NA_VALID(ifp))) {
+		nm_prlim(1, "Warning: RX packet intercepted, but no"
+				" emulated adapter");
 		return;
 	}
 
@@ -315,15 +316,16 @@ nm_os_catch_rx(struct netmap_generic_adapter *gna, int
 	nm_os_ifnet_lock();
 	if (intercept) {
 		if (gna->save_if_input) {
-			D("cannot intercept again");
-			ret = EINVAL; /* already set */
+			nm_prerr("RX on %s already intercepted", na->name);
+			ret = EBUSY; /* already set */
 			goto out;
 		}
 		gna->save_if_input = ifp->if_input;
 		ifp->if_input = freebsd_generic_rx_handler;
 	} else {
-		if (!gna->save_if_input){
-			D("cannot restore");
+		if (!gna->save_if_input) {
+			nm_prerr("Failed to undo RX intercept on %s",
+				na->name);
 			ret = EINVAL;  /* not saved */
 			goto out;
 		}
@@ -392,11 +394,11 @@ nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
 	 * we need to copy from the cluster to the netmap buffer.
 	 */
 	if (MBUF_REFCNT(m) != 1) {
-		D("invalid refcnt %d for %p", MBUF_REFCNT(m), m);
+		nm_prerr("invalid refcnt %d for %p", MBUF_REFCNT(m), m);
 		panic("in generic_xmit_frame");
 	}
 	if (m->m_ext.ext_size < len) {
-		RD(5, "size %d < len %d", m->m_ext.ext_size, len);
+		nm_prlim(2, "size %d < len %d", m->m_ext.ext_size, len);
 		len = m->m_ext.ext_size;
 	}
 	bcopy(a->addr, m->m_data, len);
@@ -459,7 +461,6 @@ nm_os_generic_set_features(struct netmap_generic_adapt
 void
 nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na)
 {
-	ND("called");
 	mit->mit_pending = 0;
 	mit->mit_ring_idx = idx;
 	mit->mit_na = na;
@@ -469,21 +470,19 @@ nm_os_mitigation_init(struct nm_generic_mit *mit, int 
 void
 nm_os_mitigation_start(struct nm_generic_mit *mit)
 {
-	ND("called");
 }
 
 
 void
 nm_os_mitigation_restart(struct nm_generic_mit *mit)
 {
-	ND("called");
 }
 
 
 int
 nm_os_mitigation_active(struct nm_generic_mit *mit)
 {
-	ND("called");
+
 	return 0;
 }
 
@@ -491,12 +490,12 @@ nm_os_mitigation_active(struct nm_generic_mit *mit)
 void
 nm_os_mitigation_cleanup(struct nm_generic_mit *mit)
 {
-	ND("called");
 }
 
 static int
 nm_vi_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr)
 {
+
 	return EINVAL;
 }
 
@@ -559,7 +558,7 @@ nm_vi_free_index(uint8_t val)
 		}
 	}
 	if (lim == nm_vi_indices.active)
-		D("funny, index %u didn't found", val);
+		nm_prerr("Index %u not found", val);
 	mtx_unlock(&nm_vi_indices.lock);
 }
 #undef NM_VI_MAX
@@ -597,7 +596,7 @@ nm_os_vi_persist(const char *name, struct ifnet **ret)
 
 	ifp = if_alloc(IFT_ETHER);
 	if (ifp == NULL) {
-		D("if_alloc failed");
+		nm_prerr("if_alloc failed");
 		return ENOMEM;
 	}
 	if_initname(ifp, name, IF_DUNIT_NONE);
@@ -638,7 +637,7 @@ struct nm_os_extmem {
 void
 nm_os_extmem_delete(struct nm_os_extmem *e)
 {
-	D("freeing %zx bytes", (size_t)e->size);
+	nm_prinf("freeing %zx bytes", (size_t)e->size);
 	vm_map_remove(kernel_map, e->kva, e->kva + e->size);
 	nm_os_free(e);
 }
@@ -688,7 +687,7 @@ nm_os_extmem_create(unsigned long p, struct nmreq_pool
 	rv = vm_map_lookup(&map, p, VM_PROT_RW, &entry,
 			&obj, &index, &prot, &wired);
 	if (rv != KERN_SUCCESS) {
-		D("address %lx not found", p);
+		nm_prerr("address %lx not found", p);
 		goto out_free;
 	}
 	/* check that we are given the whole vm_object ? */
@@ -707,13 +706,13 @@ nm_os_extmem_create(unsigned long p, struct nmreq_pool
 			VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
 			VM_PROT_READ | VM_PROT_WRITE, 0);
 	if (rv != KERN_SUCCESS) {
-		D("vm_map_find(%zx) failed", (size_t)e->size);
+		nm_prerr("vm_map_find(%zx) failed", (size_t)e->size);
 		goto out_rel;
 	}
 	rv = vm_map_wire(kernel_map, e->kva, e->kva + e->size,
 			VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
 	if (rv != KERN_SUCCESS) {
-		D("vm_map_wire failed");
+		nm_prerr("vm_map_wire failed");
 		goto out_rem;
 	}
 
@@ -795,7 +794,7 @@ nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev,
 {
 	int rid;
 
-	D("ptn_memdev_driver iomap");
+	nm_prinf("ptn_memdev_driver iomap");
 
 	rid = PCIR_BAR(PTNETMAP_MEM_PCI_BAR);
 	*mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_HI);
@@ -814,7 +813,7 @@ nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev,
 	*nm_paddr = rman_get_start(ptn_dev->pci_mem);
 	*nm_addr = rman_get_virtual(ptn_dev->pci_mem);
 
-	D("=== BAR %d start %lx len %lx mem_size %lx ===",
+	nm_prinf("=== BAR %d start %lx len %lx mem_size %lx ===",
 			PTNETMAP_MEM_PCI_BAR,
 			(unsigned long)(*nm_paddr),
 			(unsigned long)rman_get_size(ptn_dev->pci_mem),
@@ -832,7 +831,7 @@ nm_os_pt_memdev_ioread(struct ptnetmap_memdev *ptn_dev
 void
 nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev)
 {
-	D("ptn_memdev_driver iounmap");
+	nm_prinf("ptn_memdev_driver iounmap");
 
 	if (ptn_dev->pci_mem) {
 		bus_release_resource(ptn_dev->dev, SYS_RES_MEMORY,
@@ -868,8 +867,6 @@ ptn_memdev_attach(device_t dev)
 	int rid;
 	uint16_t mem_id;
 
-	D("ptn_memdev_driver attach");
-
 	ptn_dev = device_get_softc(dev);
 	ptn_dev->dev = dev;
 
@@ -893,7 +890,7 @@ ptn_memdev_attach(device_t dev)
 	}
 	netmap_mem_get(ptn_dev->nm_mem);
 
-	D("ptn_memdev_driver probe OK - host_mem_id: %d", mem_id);
+	nm_prinf("ptnetmap memdev attached, host memid: %u", mem_id);
 
 	return (0);
 }
@@ -904,10 +901,11 @@ ptn_memdev_detach(device_t dev)
 {
 	struct ptnetmap_memdev *ptn_dev;
 
-	D("ptn_memdev_driver detach");
 	ptn_dev = device_get_softc(dev);
 
 	if (ptn_dev->nm_mem) {
+		nm_prinf("ptnetmap memdev detached, host memid %u",
+			netmap_mem_get_id(ptn_dev->nm_mem));
 		netmap_mem_put(ptn_dev->nm_mem);
 		ptn_dev->nm_mem = NULL;
 	}
@@ -928,7 +926,6 @@ ptn_memdev_detach(device_t dev)
 static int
 ptn_memdev_shutdown(device_t dev)
 {
-	D("ptn_memdev_driver shutdown");
 	return bus_generic_shutdown(dev);
 }
 
@@ -953,7 +950,7 @@ netmap_dev_pager_ctor(void *handle, vm_ooffset_t size,
 	struct netmap_vm_handle_t *vmh = handle;
 
 	if (netmap_verbose)
-		D("handle %p size %jd prot %d foff %jd",
+		nm_prinf("handle %p size %jd prot %d foff %jd",
 			handle, (intmax_t)size, prot, (intmax_t)foff);
 	if (color)
 		*color = 0;
@@ -970,7 +967,7 @@ netmap_dev_pager_dtor(void *handle)
 	struct netmap_priv_d *priv = vmh->priv;
 
 	if (netmap_verbose)
-		D("handle %p", handle);
+		nm_prinf("handle %p", handle);
 	netmap_dtor(priv);
 	free(vmh, M_DEVBUF);
 	dev_rel(dev);
@@ -989,7 +986,7 @@ netmap_dev_pager_fault(vm_object_t object, vm_ooffset_
 	vm_memattr_t memattr;
 	vm_pindex_t pidx;
 
-	ND("object %p offset %jd prot %d mres %p",
+	nm_prdis("object %p offset %jd prot %d mres %p",
 			object, (intmax_t)offset, prot, mres);
 	memattr = object->memattr;
 	pidx = OFF_TO_IDX(offset);
@@ -1045,7 +1042,7 @@ netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *fo
 	vm_object_t obj;
 
 	if (netmap_verbose)
-		D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
+		nm_prinf("cdev %p foff %jd size %jd objp %p prot %d", cdev,
 		    (intmax_t )*foff, (intmax_t )objsize, objp, prot);
 
 	vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
@@ -1070,7 +1067,7 @@ netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *fo
 		&netmap_cdev_pager_ops, objsize, prot,
 		*foff, NULL);
 	if (obj == NULL) {
-		D("cdev_pager_allocate failed");
+		nm_prerr("cdev_pager_allocate failed");
 		error = EINVAL;
 		goto err_deref;
 	}
@@ -1104,7 +1101,7 @@ static int
 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
 {
 	if (netmap_verbose)
-		D("dev %p fflag 0x%x devtype %d td %p",
+		nm_prinf("dev %p fflag 0x%x devtype %d td %p",
 			dev, fflag, devtype, td);
 	return 0;
 }
@@ -1255,11 +1252,11 @@ nm_os_kctx_worker_start(struct nm_kctx *nmk)
 		goto err;
 	}
 
-	D("nm_kthread started td %p", nmk->worker);
+	nm_prinf("nm_kthread started td %p", nmk->worker);
 
 	return 0;
 err:
-	D("nm_kthread start failed err %d", error);
+	nm_prerr("nm_kthread start failed err %d", error);
 	nmk->worker = NULL;
 	return error;
 }
@@ -1337,7 +1334,7 @@ netmap_knrdetach(struct knote *kn)
 	struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
 	struct selinfo *si = &priv->np_si[NR_RX]->si;
 
-	D("remove selinfo %p", si);
+	nm_prinf("remove selinfo %p", si);
 	knlist_remove(&si->si_note, kn, /*islocked=*/0);
 }
 
@@ -1347,7 +1344,7 @@ netmap_knwdetach(struct knote *kn)
 	struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
 	struct selinfo *si = &priv->np_si[NR_TX]->si;
 
-	D("remove selinfo %p", si);
+	nm_prinf("remove selinfo %p", si);
 	knlist_remove(&si->si_note, kn, /*islocked=*/0);
 }
 
@@ -1418,17 +1415,17 @@ netmap_kqfilter(struct cdev *dev, struct knote *kn)
 	int ev = kn->kn_filter;
 
 	if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
-		D("bad filter request %d", ev);
+		nm_prerr("bad filter request %d", ev);
 		return 1;
 	}
 	error = devfs_get_cdevpriv((void**)&priv);
 	if (error) {
-		D("device not yet setup");
+		nm_prerr("device not yet setup");
 		return 1;
 	}
 	na = priv->np_na;
 	if (na == NULL) {
-		D("no netmap adapter for this file descriptor");
+		nm_prerr("no netmap adapter for this file descriptor");
 		return 1;
 	}
 	/* the si is indicated in the priv */
@@ -1535,7 +1532,7 @@ netmap_loader(__unused struct module *module, int even
 		 * then the module can not be unloaded.
 		 */
 		if (netmap_use_count) {
-			D("netmap module can not be unloaded - netmap_use_count: %d",
+			nm_prerr("netmap module can not be unloaded - netmap_use_count: %d",
 					netmap_use_count);
 			error = EBUSY;
 			break;

Modified: head/sys/dev/netmap/netmap_kloop.c
==============================================================================
--- head/sys/dev/netmap/netmap_kloop.c	Tue Jan 29 10:21:41 2019	(r343548)
+++ head/sys/dev/netmap/netmap_kloop.c	Tue Jan 29 10:28:50 2019	(r343549)
@@ -567,16 +567,12 @@ netmap_sync_kloop(struct netmap_priv_d *priv, struct n
 		/* Poll for notifications coming from the netmap rings bound to
 		 * this file descriptor. */
 		{
-			NM_SELINFO_T *si[NR_TXRX];
-
 			NMG_LOCK();
-			si[NR_RX] = nm_si_user(priv, NR_RX) ? &na->si[NR_RX] :
-				&na->rx_rings[priv->np_qfirst[NR_RX]]->si;
-			si[NR_TX] = nm_si_user(priv, NR_TX) ? &na->si[NR_TX] :
-				&na->tx_rings[priv->np_qfirst[NR_TX]]->si;
+			poll_wait(priv->np_filp, priv->np_si[NR_TX],
+			    &poll_ctx->wait_table);
+			poll_wait(priv->np_filp, priv->np_si[NR_RX],
+			    &poll_ctx->wait_table);
 			NMG_UNLOCK();
-			poll_wait(priv->np_filp, si[NR_TX], &poll_ctx->wait_table);
-			poll_wait(priv->np_filp, si[NR_RX], &poll_ctx->wait_table);
 		}
 #else   /* SYNC_KLOOP_POLL */
 		opt->nro_status = EOPNOTSUPP;
@@ -657,7 +653,7 @@ netmap_sync_kloop(struct netmap_priv_d *priv, struct n
 			/* If a poll context is present, yield to the scheduler
 			 * waiting for a notification to come either from
 			 * netmap or the application. */
-			schedule_timeout(msecs_to_jiffies(20000));
+			schedule_timeout(msecs_to_jiffies(3000));
 		} else
 #endif /* SYNC_KLOOP_POLL */
 		{
@@ -708,12 +704,31 @@ out:
 int
 netmap_sync_kloop_stop(struct netmap_priv_d *priv)
 {
+	struct netmap_adapter *na;
 	bool running = true;
 	int err = 0;
 
+	if (priv->np_nifp == NULL) {
+		return ENXIO;
+	}
+	mb(); /* make sure following reads are not from cache */
+
+	na = priv->np_na;
+	if (!nm_netmap_on(na)) {
+		return ENXIO;
+	}
+
+	/* Set the kloop stopping flag. */
 	NMG_LOCK();
 	priv->np_kloop_state |= NM_SYNC_KLOOP_STOPPING;
 	NMG_UNLOCK();
+
+	/* Send a notification to the kloop, in case it is blocked in
+	 * schedule_timeout(). We can use either RX or TX, because the
+	 * kloop is waiting on both. */
+	nm_os_selwakeup(priv->np_si[NR_RX]);
+
+	/* Wait for the kloop to actually terminate. */
 	while (running) {
 		usleep_range(1000, 1500);
 		NMG_LOCK();



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201901291028.x0TASpgu079438>