Date: Wed, 9 Jan 2019 01:11:19 +0000 (UTC) From: Gleb Smirnoff <glebius@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r342872 - in head/sys: compat/linuxkpi/common/include/linux dev/wtap net net/altq netinet netinet/netdump netinet6 netpfil/pf ofed/drivers/infiniband/core ofed/drivers/infiniband/ulp/ipoib Message-ID: <201901090111.x091BJrW055532@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: glebius Date: Wed Jan 9 01:11:19 2019 New Revision: 342872 URL: https://svnweb.freebsd.org/changeset/base/342872 Log: Mechanical cleanup of epoch(9) usage in network stack. - Remove macros that covertly create epoch_tracker on thread stack. Such macros a quite unsafe, e.g. will produce a buggy code if same macro is used in embedded scopes. Explicitly declare epoch_tracker always. - Unmask interface list IFNET_RLOCK_NOSLEEP(), interface address list IF_ADDR_RLOCK() and interface AF specific data IF_AFDATA_RLOCK() read locking macros to what they actually are - the net_epoch. Keeping them as is is very misleading. They all are named FOO_RLOCK(), while they no longer have lock semantics. Now they allow recursion and what's more important they now no longer guarantee protection against their companion WLOCK macros. Note: INP_HASH_RLOCK() has same problems, but not touched by this commit. This is non functional mechanical change. The only functionally changed functions are ni6_addrs() and ni6_store_addrs(), where we no longer enter epoch recursively. Discussed with: jtl, gallatin Modified: head/sys/compat/linuxkpi/common/include/linux/inetdevice.h head/sys/dev/wtap/if_wtap.c head/sys/net/altq/altq_subr.c head/sys/net/bridgestp.c head/sys/net/if.c head/sys/net/if_llatbl.c head/sys/net/if_var.h head/sys/net/if_vlan.c head/sys/net/route.c head/sys/net/rtsock.c head/sys/netinet/if_ether.c head/sys/netinet/igmp.c head/sys/netinet/in.c head/sys/netinet/in_mcast.c head/sys/netinet/in_pcb.c head/sys/netinet/in_pcb.h head/sys/netinet/ip_carp.c head/sys/netinet/ip_divert.c head/sys/netinet/ip_icmp.c head/sys/netinet/ip_input.c head/sys/netinet/ip_mroute.c head/sys/netinet/ip_options.c head/sys/netinet/ip_output.c head/sys/netinet/netdump/netdump_client.c head/sys/netinet/sctp_bsd_addr.c head/sys/netinet6/icmp6.c head/sys/netinet6/in6.c head/sys/netinet6/in6_ifattach.c head/sys/netinet6/in6_mcast.c head/sys/netinet6/in6_pcb.c head/sys/netinet6/in6_var.h head/sys/netinet6/mld6.c head/sys/netinet6/nd6.c head/sys/netinet6/nd6_nbr.c head/sys/netinet6/nd6_rtr.c head/sys/netinet6/raw_ip6.c head/sys/netinet6/scope6.c head/sys/netpfil/pf/pf_if.c head/sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c head/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c Modified: head/sys/compat/linuxkpi/common/include/linux/inetdevice.h ============================================================================== --- head/sys/compat/linuxkpi/common/include/linux/inetdevice.h Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/compat/linuxkpi/common/include/linux/inetdevice.h Wed Jan 9 01:11:19 2019 (r342872) @@ -37,6 +37,7 @@ static inline struct net_device * ip_dev_find(struct vnet *vnet, uint32_t addr) { struct sockaddr_in sin; + struct epoch_tracker et; struct ifaddr *ifa; struct ifnet *ifp; @@ -44,7 +45,7 @@ ip_dev_find(struct vnet *vnet, uint32_t addr) sin.sin_addr.s_addr = addr; sin.sin_len = sizeof(sin); sin.sin_family = AF_INET; - NET_EPOCH_ENTER(); + NET_EPOCH_ENTER(et); CURVNET_SET_QUIET(vnet); ifa = ifa_ifwithaddr((struct sockaddr *)&sin); CURVNET_RESTORE(); @@ -54,7 +55,7 @@ ip_dev_find(struct vnet *vnet, uint32_t addr) } else { ifp = NULL; } - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); return (ifp); } @@ -62,6 +63,7 @@ static inline struct net_device * ip6_dev_find(struct vnet *vnet, struct in6_addr addr, uint16_t scope_id) { struct sockaddr_in6 sin6; + struct epoch_tracker et; struct ifaddr *ifa; struct ifnet *ifp; @@ -74,7 +76,7 @@ ip6_dev_find(struct vnet *vnet, struct in6_addr addr, /* embed the IPv6 scope ID */ sin6.sin6_addr.s6_addr16[1] = htons(scope_id); } - NET_EPOCH_ENTER(); + NET_EPOCH_ENTER(et); CURVNET_SET_QUIET(vnet); ifa = ifa_ifwithaddr((struct sockaddr *)&sin6); CURVNET_RESTORE(); @@ -84,7 +86,7 @@ ip6_dev_find(struct vnet *vnet, struct in6_addr addr, } else { ifp = NULL; } - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); return (ifp); } Modified: head/sys/dev/wtap/if_wtap.c ============================================================================== --- head/sys/dev/wtap/if_wtap.c Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/dev/wtap/if_wtap.c Wed Jan 9 01:11:19 2019 (r342872) @@ -91,6 +91,7 @@ wtap_node_write(struct cdev *dev, struct uio *uio, int struct ifnet *ifp; struct wtap_softc *sc; uint8_t buf[1024]; + struct epoch_tracker et; int buf_len; uprintf("write device %s \"echo.\"\n", devtoname(dev)); @@ -106,7 +107,7 @@ wtap_node_write(struct cdev *dev, struct uio *uio, int m_copyback(m, 0, buf_len, buf); CURVNET_SET(TD_TO_VNET(curthread)); - IFNET_RLOCK_NOSLEEP(); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { printf("ifp->if_xname = %s\n", ifp->if_xname); @@ -119,7 +120,7 @@ wtap_node_write(struct cdev *dev, struct uio *uio, int } } - IFNET_RUNLOCK_NOSLEEP(); + NET_EPOCH_EXIT(et); CURVNET_RESTORE(); return(err); Modified: head/sys/net/altq/altq_subr.c ============================================================================== --- head/sys/net/altq/altq_subr.c Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/net/altq/altq_subr.c Wed Jan 9 01:11:19 2019 (r342872) @@ -410,11 +410,11 @@ tbr_timeout(arg) { VNET_ITERATOR_DECL(vnet_iter); struct ifnet *ifp; - int active, s; + struct epoch_tracker et; + int active; active = 0; - s = splnet(); - IFNET_RLOCK_NOSLEEP(); + NET_EPOCH_ENTER(et); VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); @@ -431,8 +431,7 @@ tbr_timeout(arg) CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); - IFNET_RUNLOCK_NOSLEEP(); - splx(s); + NET_EPOCH_EXIT(et); if (active > 0) CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0); else Modified: head/sys/net/bridgestp.c ============================================================================== --- head/sys/net/bridgestp.c Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/net/bridgestp.c Wed Jan 9 01:11:19 2019 (r342872) @@ -2022,6 +2022,7 @@ bstp_same_bridgeid(uint64_t id1, uint64_t id2) void bstp_reinit(struct bstp_state *bs) { + struct epoch_tracker et; struct bstp_port *bp; struct ifnet *ifp, *mif; u_char *e_addr; @@ -2042,7 +2043,7 @@ bstp_reinit(struct bstp_state *bs) * from is part of this bridge, so we can have more than one independent * bridges in the same STP domain. */ - IFNET_RLOCK_NOSLEEP(); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (ifp->if_type != IFT_ETHER) continue; /* Not Ethernet */ @@ -2062,7 +2063,7 @@ bstp_reinit(struct bstp_state *bs) continue; } } - IFNET_RUNLOCK_NOSLEEP(); + NET_EPOCH_EXIT(et); if (mif == NULL) goto disablestp; Modified: head/sys/net/if.c ============================================================================== --- head/sys/net/if.c Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/net/if.c Wed Jan 9 01:11:19 2019 (r342872) @@ -351,16 +351,17 @@ ifnet_byindex(u_short idx) struct ifnet * ifnet_byindex_ref(u_short idx) { + struct epoch_tracker et; struct ifnet *ifp; - IFNET_RLOCK_NOSLEEP(); + NET_EPOCH_ENTER(et); ifp = ifnet_byindex_locked(idx); if (ifp == NULL || (ifp->if_flags & IFF_DYING)) { - IFNET_RUNLOCK_NOSLEEP(); + NET_EPOCH_EXIT(et); return (NULL); } if_ref(ifp); - IFNET_RUNLOCK_NOSLEEP(); + NET_EPOCH_EXIT(et); return (ifp); } @@ -424,14 +425,15 @@ ifnet_setbyindex(u_short idx, struct ifnet *ifp) struct ifaddr * ifaddr_byindex(u_short idx) { + struct epoch_tracker et; struct ifnet *ifp; struct ifaddr *ifa = NULL; - IFNET_RLOCK_NOSLEEP(); + NET_EPOCH_ENTER(et); ifp = ifnet_byindex_locked(idx); if (ifp != NULL && (ifa = ifp->if_addr) != NULL) ifa_ref(ifa); - IFNET_RUNLOCK_NOSLEEP(); + NET_EPOCH_EXIT(et); return (ifa); } @@ -967,12 +969,14 @@ if_purgeaddrs(struct ifnet *ifp) struct ifaddr *ifa; while (1) { - NET_EPOCH_ENTER(); + struct epoch_tracker et; + + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_LINK) break; } - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); if (ifa == NULL) break; @@ -1609,38 +1613,39 @@ ifgr_groups_get(void *ifgrp) static int if_getgroup(struct ifgroupreq *ifgr, struct ifnet *ifp) { + struct epoch_tracker et; int len, error; struct ifg_list *ifgl; struct ifg_req ifgrq, *ifgp; if (ifgr->ifgr_len == 0) { - IF_ADDR_RLOCK(ifp); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) ifgr->ifgr_len += sizeof(struct ifg_req); - IF_ADDR_RUNLOCK(ifp); + NET_EPOCH_EXIT(et); return (0); } len = ifgr->ifgr_len; ifgp = ifgr_groups_get(ifgr); /* XXX: wire */ - IF_ADDR_RLOCK(ifp); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { if (len < sizeof(ifgrq)) { - IF_ADDR_RUNLOCK(ifp); + NET_EPOCH_EXIT(et); return (EINVAL); } bzero(&ifgrq, sizeof ifgrq); strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, sizeof(ifgrq.ifgrq_group)); if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { - IF_ADDR_RUNLOCK(ifp); + NET_EPOCH_EXIT(et); return (error); } len -= sizeof(ifgrq); ifgp++; } - IF_ADDR_RUNLOCK(ifp); + NET_EPOCH_EXIT(et); return (0); } @@ -1954,11 +1959,12 @@ done: int ifa_ifwithaddr_check(const struct sockaddr *addr) { + struct epoch_tracker et; int rc; - NET_EPOCH_ENTER(); + NET_EPOCH_ENTER(et); rc = (ifa_ifwithaddr(addr) != NULL); - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); return (rc); } @@ -2187,6 +2193,7 @@ ifa_preferred(struct ifaddr *cur, struct ifaddr *next) static void link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info) { + struct epoch_tracker et; struct ifaddr *ifa, *oifa; struct sockaddr *dst; struct ifnet *ifp; @@ -2194,7 +2201,7 @@ link_rtrequest(int cmd, struct rtentry *rt, struct rt_ if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == NULL) || ((ifp = ifa->ifa_ifp) == NULL) || ((dst = rt_key(rt)) == NULL)) return; - NET_EPOCH_ENTER(); + NET_EPOCH_ENTER(et); ifa = ifaof_ifpforaddr(dst, ifp); if (ifa) { oifa = rt->rt_ifa; @@ -2206,7 +2213,7 @@ link_rtrequest(int cmd, struct rtentry *rt, struct rt_ if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) ifa->ifa_rtrequest(cmd, rt, info); } - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); } struct sockaddr_dl * @@ -2407,9 +2414,10 @@ if_qflush(struct ifnet *ifp) struct ifnet * ifunit_ref(const char *name) { + struct epoch_tracker et; struct ifnet *ifp; - IFNET_RLOCK_NOSLEEP(); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 && !(ifp->if_flags & IFF_DYING)) @@ -2417,21 +2425,22 @@ ifunit_ref(const char *name) } if (ifp != NULL) if_ref(ifp); - IFNET_RUNLOCK_NOSLEEP(); + NET_EPOCH_EXIT(et); return (ifp); } struct ifnet * ifunit(const char *name) { + struct epoch_tracker et; struct ifnet *ifp; - IFNET_RLOCK_NOSLEEP(); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0) break; } - IFNET_RUNLOCK_NOSLEEP(); + NET_EPOCH_EXIT(et); return (ifp); } @@ -2819,6 +2828,7 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, return (EINVAL); if (cmd == SIOCADDMULTI) { + struct epoch_tracker et; struct ifmultiaddr *ifma; /* @@ -2828,9 +2838,9 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, * lose a race while we check if the membership * already exists. */ - IF_ADDR_RLOCK(ifp); + NET_EPOCH_ENTER(et); ifma = if_findmulti(ifp, &ifr->ifr_addr); - IF_ADDR_RUNLOCK(ifp); + NET_EPOCH_EXIT(et); if (ifma != NULL) error = EADDRINUSE; else @@ -3253,6 +3263,7 @@ again: IFNET_RLOCK(); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { + struct epoch_tracker et; int addrs; /* @@ -3269,7 +3280,7 @@ again: } addrs = 0; - IF_ADDR_RLOCK(ifp); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { struct sockaddr *sa = ifa->ifa_addr; @@ -3297,7 +3308,7 @@ again: if (sbuf_error(sb) == 0) valid_len = sbuf_len(sb); } - IF_ADDR_RUNLOCK(ifp); + NET_EPOCH_EXIT(et); if (addrs == 0) { sbuf_bcat(sb, &ifr, sizeof(ifr)); max_len += sizeof(ifr); @@ -3604,15 +3615,16 @@ if_delmulti(struct ifnet *ifp, struct sockaddr *sa) struct ifmultiaddr *ifma; int lastref; #ifdef INVARIANTS + struct epoch_tracker et; struct ifnet *oifp; - IFNET_RLOCK_NOSLEEP(); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(oifp, &V_ifnet, if_link) if (ifp == oifp) break; if (ifp != oifp) ifp = NULL; - IFNET_RUNLOCK_NOSLEEP(); + NET_EPOCH_EXIT(et); KASSERT(ifp != NULL, ("%s: ifnet went away", __func__)); #endif @@ -3678,15 +3690,16 @@ if_delmulti_ifma_flags(struct ifmultiaddr *ifma, int f if (ifp == NULL) { printf("%s: ifma_ifp seems to be detached\n", __func__); } else { + struct epoch_tracker et; struct ifnet *oifp; - IFNET_RLOCK_NOSLEEP(); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(oifp, &V_ifnet, if_link) if (ifp == oifp) break; if (ifp != oifp) ifp = NULL; - IFNET_RUNLOCK_NOSLEEP(); + NET_EPOCH_EXIT(et); } #endif /* @@ -3810,10 +3823,11 @@ if_setlladdr(struct ifnet *ifp, const u_char *lladdr, struct sockaddr_dl *sdl; struct ifaddr *ifa; struct ifreq ifr; + struct epoch_tracker et; int rc; rc = 0; - NET_EPOCH_ENTER(); + NET_EPOCH_ENTER(et); ifa = ifp->if_addr; if (ifa == NULL) { rc = EINVAL; @@ -3847,7 +3861,7 @@ if_setlladdr(struct ifnet *ifp, const u_char *lladdr, * to re-init it in order to reprogram its * address filter. */ - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); if ((ifp->if_flags & IFF_UP) != 0) { if (ifp->if_ioctl) { ifp->if_flags &= ~IFF_UP; @@ -3863,7 +3877,7 @@ if_setlladdr(struct ifnet *ifp, const u_char *lladdr, EVENTHANDLER_INVOKE(iflladdr_event, ifp); return (0); out: - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); return (rc); } Modified: head/sys/net/if_llatbl.c ============================================================================== --- head/sys/net/if_llatbl.c Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/net/if_llatbl.c Wed Jan 9 01:11:19 2019 (r342872) @@ -90,6 +90,7 @@ static int htable_foreach_lle(struct lltable *llt, llt static int lltable_dump_af(struct lltable *llt, struct sysctl_req *wr) { + struct epoch_tracker et; int error; LLTABLE_LIST_LOCK_ASSERT(); @@ -98,10 +99,10 @@ lltable_dump_af(struct lltable *llt, struct sysctl_req return (0); error = 0; - IF_AFDATA_RLOCK(llt->llt_ifp); + NET_EPOCH_ENTER(et); error = lltable_foreach_lle(llt, (llt_foreach_cb_t *)llt->llt_dump_entry, wr); - IF_AFDATA_RUNLOCK(llt->llt_ifp); + NET_EPOCH_EXIT(et); return (error); } @@ -453,11 +454,12 @@ struct llentry * llentry_alloc(struct ifnet *ifp, struct lltable *lt, struct sockaddr_storage *dst) { + struct epoch_tracker et; struct llentry *la, *la_tmp; - IF_AFDATA_RLOCK(ifp); + NET_EPOCH_ENTER(et); la = lla_lookup(lt, LLE_EXCLUSIVE, (struct sockaddr *)dst); - IF_AFDATA_RUNLOCK(ifp); + NET_EPOCH_EXIT(et); if (la != NULL) { LLE_ADDREF(la); Modified: head/sys/net/if_var.h ============================================================================== --- head/sys/net/if_var.h Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/net/if_var.h Wed Jan 9 01:11:19 2019 (r342872) @@ -398,20 +398,16 @@ struct ifnet { */ #define IF_ADDR_LOCK_INIT(if) mtx_init(&(if)->if_addr_lock, "if_addr_lock", NULL, MTX_DEF) #define IF_ADDR_LOCK_DESTROY(if) mtx_destroy(&(if)->if_addr_lock) -#define IF_ADDR_RLOCK(if) struct epoch_tracker if_addr_et; epoch_enter_preempt(net_epoch_preempt, &if_addr_et); -#define IF_ADDR_RUNLOCK(if) epoch_exit_preempt(net_epoch_preempt, &if_addr_et); #define IF_ADDR_WLOCK(if) mtx_lock(&(if)->if_addr_lock) #define IF_ADDR_WUNLOCK(if) mtx_unlock(&(if)->if_addr_lock) #define IF_ADDR_LOCK_ASSERT(if) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(if)->if_addr_lock)) #define IF_ADDR_WLOCK_ASSERT(if) mtx_assert(&(if)->if_addr_lock, MA_OWNED) -#define NET_EPOCH_ENTER() struct epoch_tracker nep_et; epoch_enter_preempt(net_epoch_preempt, &nep_et) -#define NET_EPOCH_ENTER_ET(et) epoch_enter_preempt(net_epoch_preempt, &(et)) -#define NET_EPOCH_EXIT() epoch_exit_preempt(net_epoch_preempt, &nep_et) -#define NET_EPOCH_EXIT_ET(et) epoch_exit_preempt(net_epoch_preempt, &(et)) -#define NET_EPOCH_WAIT() epoch_wait_preempt(net_epoch_preempt) +#define NET_EPOCH_ENTER(et) epoch_enter_preempt(net_epoch_preempt, &(et)) +#define NET_EPOCH_EXIT(et) epoch_exit_preempt(net_epoch_preempt, &(et)) +#define NET_EPOCH_WAIT() epoch_wait_preempt(net_epoch_preempt) +#define NET_EPOCH_ASSERT() MPASS(in_epoch(net_epoch_preempt)) - /* * Function variations on locking macros intended to be used by loadable * kernel modules in order to divorce them from the internals of address list @@ -490,16 +486,13 @@ EVENTHANDLER_DECLARE(group_change_event, group_change_ mtx_init(&(ifp)->if_afdata_lock, "if_afdata", NULL, MTX_DEF) #define IF_AFDATA_WLOCK(ifp) mtx_lock(&(ifp)->if_afdata_lock) -#define IF_AFDATA_RLOCK(ifp) struct epoch_tracker if_afdata_et; epoch_enter_preempt(net_epoch_preempt, &if_afdata_et) #define IF_AFDATA_WUNLOCK(ifp) mtx_unlock(&(ifp)->if_afdata_lock) -#define IF_AFDATA_RUNLOCK(ifp) epoch_exit_preempt(net_epoch_preempt, &if_afdata_et) #define IF_AFDATA_LOCK(ifp) IF_AFDATA_WLOCK(ifp) #define IF_AFDATA_UNLOCK(ifp) IF_AFDATA_WUNLOCK(ifp) #define IF_AFDATA_TRYLOCK(ifp) mtx_trylock(&(ifp)->if_afdata_lock) #define IF_AFDATA_DESTROY(ifp) mtx_destroy(&(ifp)->if_afdata_lock) #define IF_AFDATA_LOCK_ASSERT(ifp) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ifp)->if_afdata_lock)) -#define IF_AFDATA_RLOCK_ASSERT(ifp) MPASS(in_epoch(net_epoch_preempt)); #define IF_AFDATA_WLOCK_ASSERT(ifp) mtx_assert(&(ifp)->if_afdata_lock, MA_OWNED) #define IF_AFDATA_UNLOCK_ASSERT(ifp) mtx_assert(&(ifp)->if_afdata_lock, MA_NOTOWNED) @@ -583,16 +576,13 @@ extern struct sx ifnet_sxlock; * write, but also whether it was acquired with sleep support or not. */ #define IFNET_RLOCK_ASSERT() sx_assert(&ifnet_sxlock, SA_SLOCKED) -#define IFNET_RLOCK_NOSLEEP_ASSERT() MPASS(in_epoch(net_epoch_preempt)) #define IFNET_WLOCK_ASSERT() do { \ sx_assert(&ifnet_sxlock, SA_XLOCKED); \ rw_assert(&ifnet_rwlock, RA_WLOCKED); \ } while (0) #define IFNET_RLOCK() sx_slock(&ifnet_sxlock) -#define IFNET_RLOCK_NOSLEEP() struct epoch_tracker ifnet_rlock_et; epoch_enter_preempt(net_epoch_preempt, &ifnet_rlock_et) #define IFNET_RUNLOCK() sx_sunlock(&ifnet_sxlock) -#define IFNET_RUNLOCK_NOSLEEP() epoch_exit_preempt(net_epoch_preempt, &ifnet_rlock_et) /* * Look up an ifnet given its index; the _ref variant also acquires a Modified: head/sys/net/if_vlan.c ============================================================================== --- head/sys/net/if_vlan.c Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/net/if_vlan.c Wed Jan 9 01:11:19 2019 (r342872) @@ -233,10 +233,6 @@ static struct sx _VLAN_SX_ID; #define VLAN_LOCKING_DESTROY() \ sx_destroy(&_VLAN_SX_ID) -#define VLAN_RLOCK() NET_EPOCH_ENTER(); -#define VLAN_RUNLOCK() NET_EPOCH_EXIT(); -#define VLAN_RLOCK_ASSERT() MPASS(in_epoch(net_epoch_preempt)) - #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) @@ -252,11 +248,8 @@ static struct sx _VLAN_SX_ID; */ #define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF) #define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock) -#define TRUNK_RLOCK(trunk) NET_EPOCH_ENTER() #define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock) -#define TRUNK_RUNLOCK(trunk) NET_EPOCH_EXIT(); #define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock) -#define TRUNK_RLOCK_ASSERT(trunk) MPASS(in_epoch(net_epoch_preempt)) #define TRUNK_LOCK_ASSERT(trunk) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(trunk)->lock)) #define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED); @@ -472,7 +465,7 @@ vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) { struct ifvlan *ifv; - TRUNK_RLOCK_ASSERT(trunk); + NET_EPOCH_ASSERT(); CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) if (ifv->ifv_vid == vid) @@ -617,16 +610,17 @@ vlan_setmulti(struct ifnet *ifp) static void vlan_iflladdr(void *arg __unused, struct ifnet *ifp) { + struct epoch_tracker et; struct ifvlan *ifv; struct ifnet *ifv_ifp; struct ifvlantrunk *trunk; struct sockaddr_dl *sdl; /* Need the rmlock since this is run on taskqueue_swi. */ - VLAN_RLOCK(); + NET_EPOCH_ENTER(et); trunk = ifp->if_vlantrunk; if (trunk == NULL) { - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); return; } @@ -652,7 +646,7 @@ vlan_iflladdr(void *arg __unused, struct ifnet *ifp) taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); } TRUNK_WUNLOCK(trunk); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); } /* @@ -698,17 +692,18 @@ vlan_ifdetach(void *arg __unused, struct ifnet *ifp) static struct ifnet * vlan_trunkdev(struct ifnet *ifp) { + struct epoch_tracker et; struct ifvlan *ifv; if (ifp->if_type != IFT_L2VLAN) return (NULL); - VLAN_RLOCK(); + NET_EPOCH_ENTER(et); ifv = ifp->if_softc; ifp = NULL; if (ifv->ifv_trunk) ifp = PARENT(ifv); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); return (ifp); } @@ -780,20 +775,21 @@ vlan_setcookie(struct ifnet *ifp, void *cookie) static struct ifnet * vlan_devat(struct ifnet *ifp, uint16_t vid) { + struct epoch_tracker et; struct ifvlantrunk *trunk; struct ifvlan *ifv; - VLAN_RLOCK(); + NET_EPOCH_ENTER(et); trunk = ifp->if_vlantrunk; if (trunk == NULL) { - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); return (NULL); } ifp = NULL; ifv = vlan_gethash(trunk, vid); if (ifv) ifp = ifv->ifv_ifp; - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); return (ifp); } @@ -1133,15 +1129,16 @@ vlan_init(void *foo __unused) static int vlan_transmit(struct ifnet *ifp, struct mbuf *m) { + struct epoch_tracker et; struct ifvlan *ifv; struct ifnet *p; int error, len, mcast; - VLAN_RLOCK(); + NET_EPOCH_ENTER(et); ifv = ifp->if_softc; if (TRUNK(ifv) == NULL) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); m_freem(m); return (ENETDOWN); } @@ -1157,14 +1154,14 @@ vlan_transmit(struct ifnet *ifp, struct mbuf *m) */ if (!UP_AND_RUNNING(p)) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); m_freem(m); return (ENETDOWN); } if (!ether_8021q_frame(&m, ifp, p, ifv->ifv_vid, ifv->ifv_pcp)) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); return (0); } @@ -1178,7 +1175,7 @@ vlan_transmit(struct ifnet *ifp, struct mbuf *m) if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); } else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); return (error); } @@ -1193,15 +1190,16 @@ vlan_qflush(struct ifnet *ifp __unused) static void vlan_input(struct ifnet *ifp, struct mbuf *m) { + struct epoch_tracker et; struct ifvlantrunk *trunk; struct ifvlan *ifv; struct m_tag *mtag; uint16_t vid, tag; - VLAN_RLOCK(); + NET_EPOCH_ENTER(et); trunk = ifp->if_vlantrunk; if (trunk == NULL) { - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); m_freem(m); return; } @@ -1224,7 +1222,7 @@ vlan_input(struct ifnet *ifp, struct mbuf *m) if (m->m_len < sizeof(*evl) && (m = m_pullup(m, sizeof(*evl))) == NULL) { if_printf(ifp, "cannot pullup VLAN header\n"); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); return; } evl = mtod(m, struct ether_vlan_header *); @@ -1247,7 +1245,7 @@ vlan_input(struct ifnet *ifp, struct mbuf *m) __func__, ifp->if_xname, ifp->if_type); #endif if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); m_freem(m); return; } @@ -1257,7 +1255,7 @@ vlan_input(struct ifnet *ifp, struct mbuf *m) ifv = vlan_gethash(trunk, vid); if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); m_freem(m); return; @@ -1277,7 +1275,7 @@ vlan_input(struct ifnet *ifp, struct mbuf *m) sizeof(uint8_t), M_NOWAIT); if (mtag == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); m_freem(m); return; } @@ -1288,7 +1286,7 @@ vlan_input(struct ifnet *ifp, struct mbuf *m) m->m_pkthdr.rcvif = ifv->ifv_ifp; if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); /* Pass it back through the parent's input routine. */ (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); @@ -1314,6 +1312,7 @@ vlan_lladdr_fn(void *arg, int pending __unused) static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid) { + struct epoch_tracker et; struct ifvlantrunk *trunk; struct ifnet *ifp; int error = 0; @@ -1413,9 +1412,9 @@ vlan_config(struct ifvlan *ifv, struct ifnet *p, uint1 ifp->if_link_state = p->if_link_state; - TRUNK_RLOCK(TRUNK(ifv)); + NET_EPOCH_ENTER(et); vlan_capabilities(ifv); - TRUNK_RUNLOCK(TRUNK(ifv)); + NET_EPOCH_EXIT(et); /* * Set up our interface address to reflect the underlying @@ -1587,14 +1586,15 @@ vlan_setflags(struct ifnet *ifp, int status) static void vlan_link_state(struct ifnet *ifp) { + struct epoch_tracker et; struct ifvlantrunk *trunk; struct ifvlan *ifv; /* Called from a taskqueue_swi task, so we cannot sleep. */ - VLAN_RLOCK(); + NET_EPOCH_ENTER(et); trunk = ifp->if_vlantrunk; if (trunk == NULL) { - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); return; } @@ -1605,7 +1605,7 @@ vlan_link_state(struct ifnet *ifp) trunk->parent->if_link_state); } TRUNK_WUNLOCK(trunk); - VLAN_RUNLOCK(); + NET_EPOCH_EXIT(et); } static void @@ -1618,7 +1618,7 @@ vlan_capabilities(struct ifvlan *ifv) u_long hwa = 0; VLAN_SXLOCK_ASSERT(); - TRUNK_RLOCK_ASSERT(TRUNK(ifv)); + NET_EPOCH_ASSERT(); p = PARENT(ifv); ifp = ifv->ifv_ifp; @@ -1710,6 +1710,7 @@ vlan_capabilities(struct ifvlan *ifv) static void vlan_trunk_capabilities(struct ifnet *ifp) { + struct epoch_tracker et; struct ifvlantrunk *trunk; struct ifvlan *ifv; @@ -1719,11 +1720,11 @@ vlan_trunk_capabilities(struct ifnet *ifp) VLAN_SUNLOCK(); return; } - TRUNK_RLOCK(trunk); + NET_EPOCH_ENTER(et); VLAN_FOREACH(ifv, trunk) { vlan_capabilities(ifv); } - TRUNK_RUNLOCK(trunk); + NET_EPOCH_EXIT(et); VLAN_SUNLOCK(); } @@ -1915,9 +1916,11 @@ vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data ifv->ifv_capenable = ifr->ifr_reqcap; trunk = TRUNK(ifv); if (trunk != NULL) { - TRUNK_RLOCK(trunk); + struct epoch_tracker et; + + NET_EPOCH_ENTER(et); vlan_capabilities(ifv); - TRUNK_RUNLOCK(trunk); + NET_EPOCH_EXIT(et); } VLAN_SUNLOCK(); break; Modified: head/sys/net/route.c ============================================================================== --- head/sys/net/route.c Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/net/route.c Wed Jan 9 01:11:19 2019 (r342872) @@ -593,11 +593,12 @@ rtredirect_fib(struct sockaddr *dst, int error = 0; short *stat = NULL; struct rt_addrinfo info; + struct epoch_tracker et; struct ifaddr *ifa; struct rib_head *rnh; ifa = NULL; - NET_EPOCH_ENTER(); + NET_EPOCH_ENTER(et); rnh = rt_tables_get_rnh(fibnum, dst->sa_family); if (rnh == NULL) { error = EAFNOSUPPORT; @@ -692,7 +693,7 @@ done: if (rt) RTFREE_LOCKED(rt); out: - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); if (error) V_rtstat.rts_badredirect++; else if (stat != NULL) @@ -1279,6 +1280,7 @@ rt_notifydelete(struct rtentry *rt, struct rt_addrinfo int rt_getifa_fib(struct rt_addrinfo *info, u_int fibnum) { + struct epoch_tracker et; struct ifaddr *ifa; int needref, error; @@ -1288,7 +1290,7 @@ rt_getifa_fib(struct rt_addrinfo *info, u_int fibnum) */ error = 0; needref = (info->rti_ifa == NULL); - NET_EPOCH_ENTER(); + NET_EPOCH_ENTER(et); if (info->rti_ifp == NULL && ifpaddr != NULL && ifpaddr->sa_family == AF_LINK && (ifa = ifa_ifwithnet(ifpaddr, 0, fibnum)) != NULL) { @@ -1316,7 +1318,7 @@ rt_getifa_fib(struct rt_addrinfo *info, u_int fibnum) ifa_ref(info->rti_ifa); } else error = ENETUNREACH; - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); return (error); } Modified: head/sys/net/rtsock.c ============================================================================== --- head/sys/net/rtsock.c Wed Jan 9 01:09:42 2019 (r342871) +++ head/sys/net/rtsock.c Wed Jan 9 01:11:19 2019 (r342872) @@ -440,6 +440,9 @@ static int rtm_get_jailed(struct rt_addrinfo *info, struct ifnet *ifp, struct rtentry *rt, union sockaddr_union *saun, struct ucred *cred) { +#if defined(INET) || defined(INET6) + struct epoch_tracker et; +#endif /* First, see if the returned address is part of the jail. */ if (prison_if(cred, rt->rt_ifa->ifa_addr) == 0) { @@ -460,7 +463,7 @@ rtm_get_jailed(struct rt_addrinfo *info, struct ifnet * Try to find an address on the given outgoing interface * that belongs to the jail. */ - IF_ADDR_RLOCK(ifp); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { struct sockaddr *sa; sa = ifa->ifa_addr; @@ -472,7 +475,7 @@ rtm_get_jailed(struct rt_addrinfo *info, struct ifnet break; } } - IF_ADDR_RUNLOCK(ifp); + NET_EPOCH_EXIT(et); if (!found) { /* * As a last resort return the 'default' jail address. @@ -502,7 +505,7 @@ rtm_get_jailed(struct rt_addrinfo *info, struct ifnet * Try to find an address on the given outgoing interface * that belongs to the jail. */ - IF_ADDR_RLOCK(ifp); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { struct sockaddr *sa; sa = ifa->ifa_addr; @@ -515,7 +518,7 @@ rtm_get_jailed(struct rt_addrinfo *info, struct ifnet break; } } - IF_ADDR_RUNLOCK(ifp); + NET_EPOCH_EXIT(et); if (!found) { /* * As a last resort return the 'default' jail address. @@ -786,16 +789,17 @@ route_output(struct mbuf *m, struct socket *so, ...) if (rt->rt_ifp != NULL && rt->rt_ifp->if_type == IFT_PROPVIRTUAL) { + struct epoch_tracker et; struct ifaddr *ifa; - NET_EPOCH_ENTER(); + NET_EPOCH_ENTER(et); ifa = ifa_ifwithnet(info.rti_info[RTAX_DST], 1, RT_ALL_FIBS); if (ifa != NULL) rt_maskedcopy(ifa->ifa_addr, &laddr, ifa->ifa_netmask); - NET_EPOCH_EXIT(); + NET_EPOCH_EXIT(et); } else rt_maskedcopy(rt->rt_ifa->ifa_addr, &laddr, @@ -1559,7 +1563,7 @@ sysctl_dumpentry(struct radix_node *rn, void *vw) struct rt_addrinfo info; struct sockaddr_storage ss; - IFNET_RLOCK_NOSLEEP_ASSERT(); + NET_EPOCH_ASSERT(); if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg)) return 0; @@ -1753,7 +1757,7 @@ sysctl_iflist(int af, struct walkarg *w) bzero((caddr_t)&info, sizeof(info)); bzero(&ifd, sizeof(ifd)); - NET_EPOCH_ENTER_ET(et); + NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (w->w_arg && w->w_arg != ifp->if_index) continue; @@ -1803,7 +1807,7 @@ sysctl_iflist(int af, struct walkarg *w) info.rti_info[RTAX_BRD] = NULL; } done: - NET_EPOCH_EXIT_ET(et); + NET_EPOCH_EXIT(et); return (error); } @@ -1811,6 +1815,7 @@ static int sysctl_ifmalist(int af, struct walkarg *w) { struct rt_addrinfo info; + struct epoch_tracker et; struct ifaddr *ifa; struct ifmultiaddr *ifma; struct ifnet *ifp; @@ -1819,13 +1824,13 @@ sysctl_ifmalist(int af, struct walkarg *w) error = 0; *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201901090111.x091BJrW055532>