Date: Tue, 11 Dec 2018 11:31:14 +0000 (UTC) From: Vincenzo Maffione <vmaffione@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org Subject: svn commit: r341815 - in stable/12/sys: conf dev/netmap modules/netmap net Message-ID: <201812111131.wBBBVEo7015945@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: vmaffione Date: Tue Dec 11 11:31:13 2018 New Revision: 341815 URL: https://svnweb.freebsd.org/changeset/base/341815 Log: MFC r341516, r341589 netmap: align codebase to the current upstream (760279cfb2730a585) Changelist: - Replace netmap passthrough host support with a more general mechanism to call TXSYNC/RXSYNC from an in-kernel event-loop. No kernel threads are used to use this feature: the application is required to spawn a thread (or a process) and issue a SYNC_KLOOP_START (NIOCCTRL) command in the thread body. The kernel loop is executed by the ioctl implementation, which returns to userspace only when a different thread calls SYNC_KLOOP_STOP or the netmap file descriptor is closed. - Update the if_ptnet driver to cope with the new data structures, and prune all the obsolete ptnetmap code. - Add support for "null" netmap ports, useful to allocate netmap_if, netmap_ring and netmap buffers to be used by specialized applications (e.g. hypervisors). TXSYNC/RXSYNC on these ports have no effect. - Various fixes and code refactoring. Sponsored by: Sunny Valley Networks Differential Revision: https://reviews.freebsd.org/D18015 Added: stable/12/sys/dev/netmap/netmap_kloop.c - copied unchanged from r341516, head/sys/dev/netmap/netmap_kloop.c stable/12/sys/dev/netmap/netmap_null.c - copied unchanged from r341516, head/sys/dev/netmap/netmap_null.c Modified: stable/12/sys/conf/files stable/12/sys/dev/netmap/if_ixl_netmap.h stable/12/sys/dev/netmap/if_ptnet.c stable/12/sys/dev/netmap/if_vtnet_netmap.h stable/12/sys/dev/netmap/netmap.c stable/12/sys/dev/netmap/netmap_bdg.c stable/12/sys/dev/netmap/netmap_bdg.h stable/12/sys/dev/netmap/netmap_freebsd.c stable/12/sys/dev/netmap/netmap_generic.c stable/12/sys/dev/netmap/netmap_kern.h stable/12/sys/dev/netmap/netmap_legacy.c stable/12/sys/dev/netmap/netmap_mem2.c stable/12/sys/dev/netmap/netmap_mem2.h stable/12/sys/dev/netmap/netmap_pipe.c stable/12/sys/dev/netmap/netmap_vale.c stable/12/sys/modules/netmap/Makefile stable/12/sys/net/netmap.h stable/12/sys/net/netmap_user.h stable/12/sys/net/netmap_virt.h Directory Properties: stable/12/ (props changed) Modified: stable/12/sys/conf/files ============================================================================== --- stable/12/sys/conf/files Tue Dec 11 11:13:11 2018 (r341814) +++ stable/12/sys/conf/files Tue Dec 11 11:31:13 2018 (r341815) @@ -2534,17 +2534,19 @@ dev/ncv/ncr53c500.c optional ncv dev/ncv/ncr53c500_pccard.c optional ncv pccard dev/netmap/if_ptnet.c optional netmap inet dev/netmap/netmap.c optional netmap +dev/netmap/netmap_bdg.c optional netmap dev/netmap/netmap_freebsd.c optional netmap dev/netmap/netmap_generic.c optional netmap +dev/netmap/netmap_kloop.c optional netmap +dev/netmap/netmap_legacy.c optional netmap dev/netmap/netmap_mbq.c optional netmap dev/netmap/netmap_mem2.c optional netmap dev/netmap/netmap_monitor.c optional netmap +dev/netmap/netmap_null.c optional netmap dev/netmap/netmap_offloadings.c optional netmap dev/netmap/netmap_pipe.c optional netmap dev/netmap/netmap_pt.c optional netmap dev/netmap/netmap_vale.c optional netmap -dev/netmap/netmap_legacy.c optional netmap -dev/netmap/netmap_bdg.c optional netmap # compile-with "${NORMAL_C} -Wconversion -Wextra" dev/nfsmb/nfsmb.c optional nfsmb pci dev/nge/if_nge.c optional nge Modified: stable/12/sys/dev/netmap/if_ixl_netmap.h ============================================================================== --- stable/12/sys/dev/netmap/if_ixl_netmap.h Tue Dec 11 11:13:11 2018 (r341814) +++ stable/12/sys/dev/netmap/if_ixl_netmap.h Tue Dec 11 11:31:13 2018 (r341815) @@ -129,7 +129,7 @@ ixl_netmap_attach(struct ixl_vsi *vsi) na.ifp = vsi->ifp; na.na_flags = NAF_BDG_MAYSLEEP; // XXX check that queues is set. - nm_prinf("queues is %p\n", vsi->queues); + nm_prinf("queues is %p", vsi->queues); if (vsi->queues) { na.num_tx_desc = vsi->queues[0].num_desc; na.num_rx_desc = vsi->queues[0].num_desc; Modified: stable/12/sys/dev/netmap/if_ptnet.c ============================================================================== --- stable/12/sys/dev/netmap/if_ptnet.c Tue Dec 11 11:13:11 2018 (r341814) +++ stable/12/sys/dev/netmap/if_ptnet.c Tue Dec 11 11:31:13 2018 (r341815) @@ -128,8 +128,8 @@ struct ptnet_queue { struct resource *irq; void *cookie; int kring_id; - struct ptnet_csb_gh *ptgh; - struct ptnet_csb_hg *pthg; + struct nm_csb_atok *atok; + struct nm_csb_ktoa *ktoa; unsigned int kick; struct mtx lock; struct buf_ring *bufring; /* for TX queues */ @@ -166,8 +166,8 @@ struct ptnet_softc { unsigned int num_tx_rings; struct ptnet_queue *queues; struct ptnet_queue *rxqueues; - struct ptnet_csb_gh *csb_gh; - struct ptnet_csb_hg *csb_hg; + struct nm_csb_atok *csb_gh; + struct nm_csb_ktoa *csb_hg; unsigned int min_tx_space; @@ -209,7 +209,7 @@ static void ptnet_tick(void *opaque); static int ptnet_irqs_init(struct ptnet_softc *sc); static void ptnet_irqs_fini(struct ptnet_softc *sc); -static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); +static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd); static int ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info); static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); @@ -327,7 +327,7 @@ ptnet_attach(device_t dev) sc->num_rings = num_tx_rings + num_rx_rings; sc->num_tx_rings = num_tx_rings; - if (sc->num_rings * sizeof(struct ptnet_csb_gh) > PAGE_SIZE) { + if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { device_printf(dev, "CSB cannot handle that many rings (%u)\n", sc->num_rings); err = ENOMEM; @@ -342,7 +342,7 @@ ptnet_attach(device_t dev) err = ENOMEM; goto err_path; } - sc->csb_hg = (struct ptnet_csb_hg *)(((char *)sc->csb_gh) + PAGE_SIZE); + sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); { /* @@ -379,8 +379,8 @@ ptnet_attach(device_t dev) pq->sc = sc; pq->kring_id = i; pq->kick = PTNET_IO_KICK_BASE + 4 * i; - pq->ptgh = sc->csb_gh + i; - pq->pthg = sc->csb_hg + i; + pq->atok = sc->csb_gh + i; + pq->ktoa = sc->csb_hg + i; snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", device_get_nameunit(dev), i); mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); @@ -505,12 +505,25 @@ err_path: return err; } +/* Stop host sync-kloop if it was running. */ +static void +ptnet_device_shutdown(struct ptnet_softc *sc) +{ + ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); + bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); + bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); + bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); + bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); +} + static int ptnet_detach(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); int i; + ptnet_device_shutdown(sc); + #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) { ether_poll_deregister(sc->ifp); @@ -543,10 +556,6 @@ ptnet_detach(device_t dev) ptnet_irqs_fini(sc); if (sc->csb_gh) { - bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); - bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); - bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); - bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); sc->csb_gh = NULL; sc->csb_hg = NULL; @@ -583,9 +592,8 @@ ptnet_detach(device_t dev) static int ptnet_suspend(device_t dev) { - struct ptnet_softc *sc; + struct ptnet_softc *sc = device_get_softc(dev); - sc = device_get_softc(dev); (void)sc; return (0); @@ -594,9 +602,8 @@ ptnet_suspend(device_t dev) static int ptnet_resume(device_t dev) { - struct ptnet_softc *sc; + struct ptnet_softc *sc = device_get_softc(dev); - sc = device_get_softc(dev); (void)sc; return (0); @@ -605,11 +612,11 @@ ptnet_resume(device_t dev) static int ptnet_shutdown(device_t dev) { - /* - * Suspend already does all of what we need to - * do here; we just never expect to be resumed. - */ - return (ptnet_suspend(dev)); + struct ptnet_softc *sc = device_get_softc(dev); + + ptnet_device_shutdown(sc); + + return (0); } static int @@ -796,7 +803,7 @@ ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) /* Make sure the worker sees the * IFF_DRV_RUNNING down. */ PTNET_Q_LOCK(pq); - pq->ptgh->guest_need_kick = 0; + pq->atok->appl_need_kick = 0; PTNET_Q_UNLOCK(pq); /* Wait for rescheduling to finish. */ if (pq->taskq) { @@ -810,7 +817,7 @@ ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; PTNET_Q_LOCK(pq); - pq->ptgh->guest_need_kick = 1; + pq->atok->appl_need_kick = 1; PTNET_Q_UNLOCK(pq); } } @@ -881,7 +888,7 @@ ptnet_init_locked(struct ptnet_softc *sc) return ret; } - if (sc->ptna->backend_regifs == 0) { + if (sc->ptna->backend_users == 0) { ret = ptnet_nm_krings_create(na_nm); if (ret) { device_printf(sc->dev, "ptnet_nm_krings_create() " @@ -962,7 +969,7 @@ ptnet_stop(struct ptnet_softc *sc) ptnet_nm_register(na_dr, 0 /* off */); - if (sc->ptna->backend_regifs == 0) { + if (sc->ptna->backend_users == 0) { netmap_mem_rings_delete(na_dr); ptnet_nm_krings_delete(na_nm); } @@ -1092,9 +1099,8 @@ ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) } static uint32_t -ptnet_nm_ptctl(if_t ifp, uint32_t cmd) +ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd) { - struct ptnet_softc *sc = if_getsoftc(ifp); /* * Write a command and read back error status, * with zero meaning success. @@ -1130,8 +1136,8 @@ ptnet_sync_from_csb(struct ptnet_softc *sc, struct net /* Sync krings from the host, reading from * CSB. */ for (i = 0; i < sc->num_rings; i++) { - struct ptnet_csb_gh *ptgh = sc->queues[i].ptgh; - struct ptnet_csb_hg *pthg = sc->queues[i].pthg; + struct nm_csb_atok *atok = sc->queues[i].atok; + struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; struct netmap_kring *kring; if (i < na->num_tx_rings) { @@ -1139,15 +1145,15 @@ ptnet_sync_from_csb(struct ptnet_softc *sc, struct net } else { kring = na->rx_rings[i - na->num_tx_rings]; } - kring->rhead = kring->ring->head = ptgh->head; - kring->rcur = kring->ring->cur = ptgh->cur; - kring->nr_hwcur = pthg->hwcur; + kring->rhead = kring->ring->head = atok->head; + kring->rcur = kring->ring->cur = atok->cur; + kring->nr_hwcur = ktoa->hwcur; kring->nr_hwtail = kring->rtail = - kring->ring->tail = pthg->hwtail; + kring->ring->tail = ktoa->hwtail; ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, - pthg->hwcur, ptgh->head, ptgh->cur, - pthg->hwtail); + ktoa->hwcur, atok->head, atok->cur, + ktoa->hwtail); ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", t, i, kring->nr_hwcur, kring->rhead, kring->rcur, kring->ring->head, kring->ring->cur, kring->nr_hwtail, @@ -1178,7 +1184,7 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff int i; if (!onoff) { - sc->ptna->backend_regifs--; + sc->ptna->backend_users--; } /* If this is the last netmap client, guest interrupt enable flags may @@ -1191,17 +1197,17 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff D("Exit netmap mode, re-enable interrupts"); for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; - pq->ptgh->guest_need_kick = 1; + pq->atok->appl_need_kick = 1; } } if (onoff) { - if (sc->ptna->backend_regifs == 0) { + if (sc->ptna->backend_users == 0) { /* Initialize notification enable fields in the CSB. */ for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; - pq->pthg->host_need_kick = 1; - pq->ptgh->guest_need_kick = + pq->ktoa->kern_need_kick = 1; + pq->atok->appl_need_kick = (!(ifp->if_capenable & IFCAP_POLLING) && i >= sc->num_tx_rings); } @@ -1211,17 +1217,13 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff /* Make sure the host adapter passed through is ready * for txsync/rxsync. */ - ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_CREATE); + ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE); if (ret) { return ret; } - } - /* Sync from CSB must be done after REGIF PTCTL. Skip this - * step only if this is a netmap client and it is not the - * first one. */ - if ((!native && sc->ptna->backend_regifs == 0) || - (native && na->active_fds == 0)) { + /* Align the guest krings and rings to the state stored + * in the CSB. */ ptnet_sync_from_csb(sc, na); } @@ -1254,19 +1256,13 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff } } - /* Sync from CSB must be done before UNREGIF PTCTL, on the last - * netmap client. */ - if (native && na->active_fds == 0) { - ptnet_sync_from_csb(sc, na); + if (sc->ptna->backend_users == 0) { + ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); } - - if (sc->ptna->backend_regifs == 0) { - ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_DELETE); - } } if (onoff) { - sc->ptna->backend_regifs++; + sc->ptna->backend_users++; } return ret; @@ -1279,7 +1275,7 @@ ptnet_nm_txsync(struct netmap_kring *kring, int flags) struct ptnet_queue *pq = sc->queues + kring->ring_id; bool notify; - notify = netmap_pt_guest_txsync(pq->ptgh, pq->pthg, kring, flags); + notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); if (notify) { ptnet_kick(pq); } @@ -1294,7 +1290,7 @@ ptnet_nm_rxsync(struct netmap_kring *kring, int flags) struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; bool notify; - notify = netmap_pt_guest_rxsync(pq->ptgh, pq->pthg, kring, flags); + notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); if (notify) { ptnet_kick(pq); } @@ -1310,7 +1306,7 @@ ptnet_nm_intr(struct netmap_adapter *na, int onoff) for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; - pq->ptgh->guest_need_kick = onoff; + pq->atok->appl_need_kick = onoff; } } @@ -1676,25 +1672,13 @@ ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *h } /* End of offloading-related functions to be shared with vtnet. */ -static inline void -ptnet_sync_tail(struct ptnet_csb_hg *pthg, struct netmap_kring *kring) -{ - struct netmap_ring *ring = kring->ring; - - /* Update hwcur and hwtail as known by the host. */ - ptnetmap_guest_read_kring_csb(pthg, kring); - - /* nm_sync_finalize */ - ring->tail = kring->rtail = kring->nr_hwtail; -} - static void ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, unsigned int head, unsigned int sync_flags) { struct netmap_ring *ring = kring->ring; - struct ptnet_csb_gh *ptgh = pq->ptgh; - struct ptnet_csb_hg *pthg = pq->pthg; + struct nm_csb_atok *atok = pq->atok; + struct nm_csb_ktoa *ktoa = pq->ktoa; /* Some packets have been pushed to the netmap ring. We have * to tell the host to process the new packets, updating cur @@ -1704,11 +1688,11 @@ ptnet_ring_update(struct ptnet_queue *pq, struct netma /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ kring->rcur = kring->rhead = head; - ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead); + ptnetmap_guest_write_kring_csb(atok, kring->rcur, kring->rhead); /* Kick the host if needed. */ - if (NM_ACCESS_ONCE(pthg->host_need_kick)) { - ptgh->sync_flags = sync_flags; + if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { + atok->sync_flags = sync_flags; ptnet_kick(pq); } } @@ -1728,8 +1712,8 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, uns struct netmap_adapter *na = &sc->ptna->dr.up; if_t ifp = sc->ifp; unsigned int batch_count = 0; - struct ptnet_csb_gh *ptgh; - struct ptnet_csb_hg *pthg; + struct nm_csb_atok *atok; + struct nm_csb_ktoa *ktoa; struct netmap_kring *kring; struct netmap_ring *ring; struct netmap_slot *slot; @@ -1758,8 +1742,8 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, uns return ENETDOWN; } - ptgh = pq->ptgh; - pthg = pq->pthg; + atok = pq->atok; + ktoa = pq->ktoa; kring = na->tx_rings[pq->kring_id]; ring = kring->ring; lim = kring->nkr_num_slots - 1; @@ -1771,17 +1755,17 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, uns /* We ran out of slot, let's see if the host has * freed up some, by reading hwcur and hwtail from * the CSB. */ - ptnet_sync_tail(pthg, kring); + ptnet_sync_tail(ktoa, kring); if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* Still no slots available. Reactivate the * interrupts so that we can be notified * when some free slots are made available by * the host. */ - ptgh->guest_need_kick = 1; + atok->appl_need_kick = 1; /* Double-check. */ - ptnet_sync_tail(pthg, kring); + ptnet_sync_tail(ktoa, kring); if (likely(PTNET_TX_NOSPACE(head, kring, minspace))) { break; @@ -1790,7 +1774,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, uns RD(1, "Found more slots by doublecheck"); /* More slots were freed before reactivating * the interrupts. */ - ptgh->guest_need_kick = 0; + atok->appl_need_kick = 0; } } @@ -2020,8 +2004,8 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budg { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; - struct ptnet_csb_gh *ptgh = pq->ptgh; - struct ptnet_csb_hg *pthg = pq->pthg; + struct nm_csb_atok *atok = pq->atok; + struct nm_csb_ktoa *ktoa = pq->ktoa; struct netmap_adapter *na = &sc->ptna->dr.up; struct netmap_kring *kring = na->rx_rings[pq->kring_id]; struct netmap_ring *ring = kring->ring; @@ -2053,21 +2037,21 @@ host_sync: /* We ran out of slot, let's see if the host has * added some, by reading hwcur and hwtail from * the CSB. */ - ptnet_sync_tail(pthg, kring); + ptnet_sync_tail(ktoa, kring); if (head == ring->tail) { /* Still no slots available. Reactivate * interrupts as they were disabled by the * host thread right before issuing the * last interrupt. */ - ptgh->guest_need_kick = 1; + atok->appl_need_kick = 1; /* Double-check. */ - ptnet_sync_tail(pthg, kring); + ptnet_sync_tail(ktoa, kring); if (likely(head == ring->tail)) { break; } - ptgh->guest_need_kick = 0; + atok->appl_need_kick = 0; } } Modified: stable/12/sys/dev/netmap/if_vtnet_netmap.h ============================================================================== --- stable/12/sys/dev/netmap/if_vtnet_netmap.h Tue Dec 11 11:13:11 2018 (r341814) +++ stable/12/sys/dev/netmap/if_vtnet_netmap.h Tue Dec 11 11:31:13 2018 (r341815) @@ -79,7 +79,7 @@ vtnet_free_used(struct virtqueue *vq, int netmap_bufs, } if (deq) - nm_prinf("%d sgs dequeued from %s-%d (netmap=%d)\n", + nm_prinf("%d sgs dequeued from %s-%d (netmap=%d)", deq, nm_txrx2str(t), idx, netmap_bufs); } @@ -230,7 +230,7 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int fl /*writeable=*/0); if (unlikely(err)) { if (err != ENOSPC) - nm_prerr("virtqueue_enqueue(%s) failed: %d\n", + nm_prerr("virtqueue_enqueue(%s) failed: %d", kring->name, err); break; } @@ -251,7 +251,7 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int fl if (token == NULL) break; if (unlikely(token != (void *)txq)) - nm_prerr("BUG: TX token mismatch\n"); + nm_prerr("BUG: TX token mismatch"); else n++; } @@ -307,7 +307,7 @@ vtnet_netmap_kring_refill(struct netmap_kring *kring, /*readable=*/0, /*writeable=*/sg.sg_nseg); if (unlikely(err)) { if (err != ENOSPC) - nm_prerr("virtqueue_enqueue(%s) failed: %d\n", + nm_prerr("virtqueue_enqueue(%s) failed: %d", kring->name, err); break; } @@ -391,7 +391,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl break; } if (unlikely(token != (void *)rxq)) { - nm_prerr("BUG: RX token mismatch\n"); + nm_prerr("BUG: RX token mismatch"); } else { /* Skip the virtio-net header. */ len -= sc->vtnet_hdr_size; @@ -533,7 +533,7 @@ vtnet_netmap_attach(struct vtnet_softc *sc) netmap_attach(&na); - nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d\n", + nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d", na.num_tx_rings, na.num_tx_desc, na.num_tx_rings, na.num_rx_desc); } Modified: stable/12/sys/dev/netmap/netmap.c ============================================================================== --- stable/12/sys/dev/netmap/netmap.c Tue Dec 11 11:13:11 2018 (r341814) +++ stable/12/sys/dev/netmap/netmap.c Tue Dec 11 11:31:13 2018 (r341815) @@ -480,6 +480,9 @@ ports attached to the switch) /* user-controlled variables */ int netmap_verbose; +#ifdef CONFIG_NETMAP_DEBUG +int netmap_debug; +#endif /* CONFIG_NETMAP_DEBUG */ static int netmap_no_timestamp; /* don't timestamp on rxsync */ int netmap_no_pendintr = 1; @@ -527,9 +530,6 @@ int netmap_generic_hwcsum = 0; /* Non-zero if ptnet devices are allowed to use virtio-net headers. */ int ptnet_vnet_hdr = 1; -/* 0 if ptnetmap should not use worker threads for TX processing */ -int ptnetmap_tx_workers = 1; - /* * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated * in some other operating systems @@ -540,6 +540,10 @@ SYSCTL_DECL(_dev_netmap); SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); +#ifdef CONFIG_NETMAP_DEBUG +SYSCTL_INT(_dev_netmap, OID_AUTO, debug, + CTLFLAG_RW, &netmap_debug, 0, "Debug messages"); +#endif /* CONFIG_NETMAP_DEBUG */ SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr, @@ -569,8 +573,6 @@ SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTL #endif SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr, 0, "Allow ptnet devices to use virtio-net headers"); -SYSCTL_INT(_dev_netmap, OID_AUTO, ptnetmap_tx_workers, CTLFLAG_RW, - &ptnetmap_tx_workers, 0, "Use worker threads for pnetmap TX processing"); SYSEND; @@ -692,7 +694,7 @@ nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, op = "Clamp"; } if (op && msg) - nm_prinf("%s %s to %d (was %d)\n", op, msg, *v, oldv); + nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv); return *v; } @@ -776,13 +778,14 @@ netmap_update_config(struct netmap_adapter *na) na->num_rx_rings = info.num_rx_rings; na->num_rx_desc = info.num_rx_descs; na->rx_buf_maxsize = info.rx_buf_maxsize; - D("configuration changed for %s: txring %d x %d, " - "rxring %d x %d, rxbufsz %d", - na->name, na->num_tx_rings, na->num_tx_desc, - na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize); + if (netmap_verbose) + nm_prinf("configuration changed for %s: txring %d x %d, " + "rxring %d x %d, rxbufsz %d", + na->name, na->num_tx_rings, na->num_tx_desc, + na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize); return 0; } - D("WARNING: configuration changed for %s while active: " + nm_prerr("WARNING: configuration changed for %s while active: " "txring %d x %d, rxring %d x %d, rxbufsz %d", na->name, info.num_tx_rings, info.num_tx_descs, info.num_rx_rings, info.num_rx_descs, @@ -828,7 +831,8 @@ netmap_krings_create(struct netmap_adapter *na, u_int enum txrx t; if (na->tx_rings != NULL) { - D("warning: krings were already created"); + if (netmap_debug & NM_DEBUG_ON) + nm_prerr("warning: krings were already created"); return 0; } @@ -842,7 +846,7 @@ netmap_krings_create(struct netmap_adapter *na, u_int na->tx_rings = nm_os_malloc((size_t)len); if (na->tx_rings == NULL) { - D("Cannot allocate krings"); + nm_prerr("Cannot allocate krings"); return ENOMEM; } na->rx_rings = na->tx_rings + n[NR_TX]; @@ -910,7 +914,8 @@ netmap_krings_delete(struct netmap_adapter *na) enum txrx t; if (na->tx_rings == NULL) { - D("warning: krings were already deleted"); + if (netmap_debug & NM_DEBUG_ON) + nm_prerr("warning: krings were already deleted"); return; } @@ -1012,11 +1017,11 @@ netmap_do_unregif(struct netmap_priv_d *priv) * happens if the close() occurs while a concurrent * syscall is running. */ - if (netmap_verbose) - D("deleting last instance for %s", na->name); + if (netmap_debug & NM_DEBUG_ON) + nm_prinf("deleting last instance for %s", na->name); if (nm_netmap_on(na)) { - D("BUG: netmap on while going to delete the krings"); + nm_prerr("BUG: netmap on while going to delete the krings"); } na->nm_krings_delete(na); @@ -1033,14 +1038,6 @@ netmap_do_unregif(struct netmap_priv_d *priv) priv->np_nifp = NULL; } -/* call with NMG_LOCK held */ -static __inline int -nm_si_user(struct netmap_priv_d *priv, enum txrx t) -{ - return (priv->np_na != NULL && - (priv->np_qlast[t] - priv->np_qfirst[t] > 1)); -} - struct netmap_priv_d* netmap_priv_new(void) { @@ -1136,8 +1133,8 @@ netmap_send_up(struct ifnet *dst, struct mbq *q) /* Send packets up, outside the lock; head/prev machinery * is only useful for Windows. */ while ((m = mbq_dequeue(q)) != NULL) { - if (netmap_verbose & NM_VERB_HOST) - D("sending up pkt %p size %d", m, MBUF_LEN(m)); + if (netmap_debug & NM_DEBUG_HOST) + nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m)); prev = nm_os_send_up(dst, m, prev); if (head == NULL) head = prev; @@ -1332,8 +1329,8 @@ netmap_rxsync_from_host(struct netmap_kring *kring, in m_copydata(m, 0, len, NMB(na, slot)); ND("nm %d len %d", nm_i, len); - if (netmap_verbose) - D("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL)); + if (netmap_debug & NM_DEBUG_HOST) + nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL)); slot->len = len; slot->flags = 0; @@ -1500,7 +1497,7 @@ netmap_get_na(struct nmreq_header *hdr, if (req->nr_mode == NR_REG_PIPE_MASTER || req->nr_mode == NR_REG_PIPE_SLAVE) { /* Do not accept deprecated pipe modes. */ - D("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax"); + nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax"); return EINVAL; } @@ -1527,9 +1524,7 @@ netmap_get_na(struct nmreq_header *hdr, * 0 !NULL type matches and na created/found * !0 !NULL impossible */ - - /* try to see if this is a ptnetmap port */ - error = netmap_get_pt_host_na(hdr, na, nmd, create); + error = netmap_get_null_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; @@ -1739,7 +1734,7 @@ nm_rxsync_prologue(struct netmap_kring *kring, struct /* * Error routine called when txsync/rxsync detects an error. - * Can't do much more than resetting head =cur = hwcur, tail = hwtail + * Can't do much more than resetting head = cur = hwcur, tail = hwtail * Return 1 on reinit. * * This routine is only called by the upper half of the kernel. @@ -1810,12 +1805,6 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint3 enum txrx t; u_int j; - if ((nr_flags & NR_PTNETMAP_HOST) && ((nr_mode != NR_REG_ALL_NIC) || - nr_flags & (NR_RX_RINGS_ONLY|NR_TX_RINGS_ONLY))) { - D("Error: only NR_REG_ALL_NIC supported with netmap passthrough"); - return EINVAL; - } - for_rx_tx(t) { if (nr_flags & excluded_direction[t]) { priv->np_qfirst[t] = priv->np_qlast[t] = 0; @@ -1823,6 +1812,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint3 } switch (nr_mode) { case NR_REG_ALL_NIC: + case NR_REG_NULL: priv->np_qfirst[t] = 0; priv->np_qlast[t] = nma_get_nrings(na, t); ND("ALL/PIPE: %s %d %d", nm_txrx2str(t), @@ -1831,7 +1821,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint3 case NR_REG_SW: case NR_REG_NIC_SW: if (!(na->na_flags & NAF_HOST_RINGS)) { - D("host rings not supported"); + nm_prerr("host rings not supported"); return EINVAL; } priv->np_qfirst[t] = (nr_mode == NR_REG_SW ? @@ -1844,7 +1834,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint3 case NR_REG_ONE_NIC: if (nr_ringid >= na->num_tx_rings && nr_ringid >= na->num_rx_rings) { - D("invalid ring id %d", nr_ringid); + nm_prerr("invalid ring id %d", nr_ringid); return EINVAL; } /* if not enough rings, use the first one */ @@ -1857,11 +1847,11 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint3 priv->np_qfirst[t], priv->np_qlast[t]); break; default: - D("invalid regif type %d", nr_mode); + nm_prerr("invalid regif type %d", nr_mode); return EINVAL; } } - priv->np_flags = nr_flags | nr_mode; // TODO + priv->np_flags = nr_flags; /* Allow transparent forwarding mode in the host --> nic * direction only if all the TX hw rings have been opened. */ @@ -1871,7 +1861,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint3 } if (netmap_verbose) { - D("%s: tx [%d,%d) rx [%d,%d) id %d", + nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], @@ -1927,6 +1917,7 @@ netmap_unset_ringid(struct netmap_priv_d *priv) } priv->np_flags = 0; priv->np_txpoll = 0; + priv->np_kloop_state = 0; } @@ -1943,8 +1934,8 @@ netmap_krings_get(struct netmap_priv_d *priv) int excl = (priv->np_flags & NR_EXCLUSIVE); enum txrx t; - if (netmap_verbose) - D("%s: grabbing tx [%d, %d) rx [%d, %d)", + if (netmap_debug & NM_DEBUG_ON) + nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], @@ -2021,6 +2012,110 @@ nm_priv_rx_enabled(struct netmap_priv_d *priv) return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]); } +/* Validate the CSB entries for both directions (atok and ktoa). + * To be called under NMG_LOCK(). */ +static int +netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo) +{ + struct nm_csb_atok *csb_atok_base = + (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok; + struct nm_csb_ktoa *csb_ktoa_base = + (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa; + enum txrx t; + int num_rings[NR_TXRX], tot_rings; + size_t entry_size[2]; + void *csb_start[2]; + int i; + + if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) { + nm_prerr("Cannot update CSB while kloop is running"); + return EBUSY; + } + + tot_rings = 0; + for_rx_tx(t) { + num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t]; + tot_rings += num_rings[t]; + } + if (tot_rings <= 0) + return 0; + + if (!(priv->np_flags & NR_EXCLUSIVE)) { + nm_prerr("CSB mode requires NR_EXCLUSIVE"); + return EINVAL; + } + + entry_size[0] = sizeof(*csb_atok_base); + entry_size[1] = sizeof(*csb_ktoa_base); + csb_start[0] = (void *)csb_atok_base; + csb_start[1] = (void *)csb_ktoa_base; + + for (i = 0; i < 2; i++) { + /* On Linux we could use access_ok() to simplify + * the validation. However, the advantage of + * this approach is that it works also on + * FreeBSD. */ + size_t csb_size = tot_rings * entry_size[i]; + void *tmp; + int err; + + if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) { + nm_prerr("Unaligned CSB address"); + return EINVAL; + } + + tmp = nm_os_malloc(csb_size); + if (!tmp) + return ENOMEM; + if (i == 0) { + /* Application --> kernel direction. */ + err = copyin(csb_start[i], tmp, csb_size); + } else { + /* Kernel --> application direction. */ + memset(tmp, 0, csb_size); + err = copyout(tmp, csb_start[i], csb_size); + } + nm_os_free(tmp); + if (err) { + nm_prerr("Invalid CSB address"); + return err; + } + } + + priv->np_csb_atok_base = csb_atok_base; + priv->np_csb_ktoa_base = csb_ktoa_base; + + /* Initialize the CSB. */ + for_rx_tx(t) { + for (i = 0; i < num_rings[t]; i++) { + struct netmap_kring *kring = + NMR(priv->np_na, t)[i + priv->np_qfirst[t]]; + struct nm_csb_atok *csb_atok = csb_atok_base + i; + struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i; + + if (t == NR_RX) { + csb_atok += num_rings[NR_TX]; + csb_ktoa += num_rings[NR_TX]; + } + + CSB_WRITE(csb_atok, head, kring->rhead); + CSB_WRITE(csb_atok, cur, kring->rcur); + CSB_WRITE(csb_atok, appl_need_kick, 1); + CSB_WRITE(csb_atok, sync_flags, 1); + CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur); + CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail); + CSB_WRITE(csb_ktoa, kern_need_kick, 1); + + nm_prinf("csb_init for kring %s: head %u, cur %u, " + "hwcur %u, hwtail %u", kring->name, + kring->rhead, kring->rcur, kring->nr_hwcur, + kring->nr_hwtail); + } + } + + return 0; +} + /* * possibly move the interface to netmap-mode. * If success it returns a pointer to netmap_if, otherwise NULL. @@ -2137,7 +2232,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct net na->name, mtu, na->rx_buf_maxsize, nbs); if (na->rx_buf_maxsize == 0) { - D("%s: error: rx_buf_maxsize == 0", na->name); + nm_prerr("%s: error: rx_buf_maxsize == 0", na->name); error = EIO; goto err_drop_mem; } @@ -2149,7 +2244,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct net * cannot be used in this case. */ if (nbs < mtu) { nm_prerr("error: netmap buf size (%u) " - "< device MTU (%u)\n", nbs, mtu); + "< device MTU (%u)", nbs, mtu); error = EINVAL; goto err_drop_mem; } @@ -2162,14 +2257,14 @@ netmap_do_regif(struct netmap_priv_d *priv, struct net if (!(na->na_flags & NAF_MOREFRAG)) { nm_prerr("error: large MTU (%d) needed " "but %s does not support " - "NS_MOREFRAG\n", mtu, + "NS_MOREFRAG", mtu, na->ifp->if_xname); error = EINVAL; goto err_drop_mem; } else if (nbs < na->rx_buf_maxsize) { nm_prerr("error: using NS_MOREFRAG on " "%s requires netmap buf size " - ">= %u\n", na->ifp->if_xname, + ">= %u", na->ifp->if_xname, na->rx_buf_maxsize); error = EINVAL; goto err_drop_mem; @@ -2177,7 +2272,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct net nm_prinf("info: netmap application on " "%s needs to support " "NS_MOREFRAG " - "(MTU=%u,netmap_buf_size=%u)\n", + "(MTU=%u,netmap_buf_size=%u)", na->ifp->if_xname, mtu, nbs); } } @@ -2307,7 +2402,6 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, c struct ifnet *ifp = NULL; int error = 0; u_int i, qfirst, qlast; - struct netmap_if *nifp; struct netmap_kring **krings; int sync_flags; enum txrx t; @@ -2316,14 +2410,10 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, c case NIOCCTRL: { struct nmreq_header *hdr = (struct nmreq_header *)data; - if (hdr->nr_version != NETMAP_API) { - D("API mismatch for reqtype %d: got %d need %d", - hdr->nr_version, - hdr->nr_version, NETMAP_API); - hdr->nr_version = NETMAP_API; - } if (hdr->nr_version < NETMAP_MIN_API || hdr->nr_version > NETMAP_MAX_API) { + nm_prerr("API mismatch: got %d need %d", + hdr->nr_version, NETMAP_API); return EINVAL; } @@ -2345,13 +2435,13 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, c case NETMAP_REQ_REGISTER: { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; + struct netmap_if *nifp; + /* Protect access to priv from concurrent requests. */ NMG_LOCK(); do { - u_int memflags; -#ifdef WITH_EXTMEM struct nmreq_option *opt; -#endif /* WITH_EXTMEM */ + u_int memflags; *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201812111131.wBBBVEo7015945>