Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 21 Jun 2004 00:59:15 GMT
From:      Robert Watson <rwatson@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 55426 for review
Message-ID:  <200406210059.i5L0xF7f030948@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=55426

Change 55426 by rwatson@rwatson_paprika on 2004/06/21 00:59:02

	Integrate netperf_socket:
	
	- Loop back a number of locking annotations from rwatson_netperf.
	- Loop back a conversion of if->panic to KASSERT.
	- Loop back locking of SO_LINGER socket option.
	- Loop back removal of Giant from ip_mroute.c code around sockets.
	- Loop back assertion of inpcb lock for MAC in tcp_input().
	- Loop back SOCK_LOCK(so) around so_state frobbing in UNIX domain    
	  sockets.
	- Loop back locking of socket locks in uipc_rcvd().
	- Loop back conditional CALLOUT_MPSAFE for TCP timers.
	- Loop back soabort() locking and queueing changes in SPX.
	- Loop back annotation of NET_{LOCK,UNLOCK}_GIANT() in mutex.h
	- Loop back socket buffer locking reformulations of socket buffer
	  wakeup, release, flush, cant{send,rcv}, append, and more.

Affected files ...

.. //depot/projects/netperf_socket/sys/amd64/amd64/pmap.c#16 integrate
.. //depot/projects/netperf_socket/sys/conf/ldscript.ia64#2 integrate
.. //depot/projects/netperf_socket/sys/i386/i386/pmap.c#12 integrate
.. //depot/projects/netperf_socket/sys/kern/kern_proc.c#11 integrate
.. //depot/projects/netperf_socket/sys/kern/sys_socket.c#9 integrate
.. //depot/projects/netperf_socket/sys/kern/uipc_socket.c#21 integrate
.. //depot/projects/netperf_socket/sys/kern/uipc_socket2.c#18 integrate
.. //depot/projects/netperf_socket/sys/kern/uipc_usrreq.c#15 integrate
.. //depot/projects/netperf_socket/sys/kern/vfs_mount.c#8 integrate
.. //depot/projects/netperf_socket/sys/modules/zlib/Makefile#1 branch
.. //depot/projects/netperf_socket/sys/net/zlib.c#2 integrate
.. //depot/projects/netperf_socket/sys/netgraph/ng_fec.c#4 integrate
.. //depot/projects/netperf_socket/sys/netinet/ip_mroute.c#6 integrate
.. //depot/projects/netperf_socket/sys/netinet/tcp_input.c#11 integrate
.. //depot/projects/netperf_socket/sys/netinet/tcp_subr.c#9 integrate
.. //depot/projects/netperf_socket/sys/netipx/spx_usrreq.c#6 integrate
.. //depot/projects/netperf_socket/sys/netkey/keysock.c#2 integrate
.. //depot/projects/netperf_socket/sys/sys/mutex.h#6 integrate
.. //depot/projects/netperf_socket/sys/sys/socketvar.h#17 integrate
.. //depot/projects/netperf_socket/sys/sys/user.h#7 integrate

Differences ...

==== //depot/projects/netperf_socket/sys/amd64/amd64/pmap.c#16 (text+ko) ====

@@ -75,7 +75,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/amd64/amd64/pmap.c,v 1.477 2004/06/20 06:10:59 alc Exp $");
+__FBSDID("$FreeBSD: src/sys/amd64/amd64/pmap.c,v 1.478 2004/06/20 20:57:05 alc Exp $");
 
 /*
  *	Manages physical address maps.
@@ -2461,7 +2461,7 @@
 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
 		pte = vtopte(pv->pv_va);
 #else
-		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+		pte = pmap_pte(pmap, pv->pv_va);
 #endif
 		tpte = *pte;
 
@@ -2487,7 +2487,7 @@
 		KASSERT(m < &vm_page_array[vm_page_array_size],
 			("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
 
-		pv->pv_pmap->pm_stats.resident_count--;
+		pmap->pm_stats.resident_count--;
 
 		pte_clear(pte);
 
@@ -2499,15 +2499,14 @@
 		}
 
 		npv = TAILQ_NEXT(pv, pv_plist);
-		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
+		TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
 
 		m->md.pv_list_count--;
 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
-		if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
+		if (TAILQ_EMPTY(&m->md.pv_list))
 			vm_page_flag_clear(m, PG_WRITEABLE);
-		}
 
-		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
+		pmap_unuse_pt(pmap, pv->pv_va, pv->pv_ptem);
 		free_pv_entry(pv);
 	}
 	pmap_invalidate_all(pmap);
@@ -2525,9 +2524,11 @@
 {
 	pv_entry_t pv;
 	pt_entry_t *pte;
+	boolean_t rv;
 
+	rv = FALSE;
 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
-		return FALSE;
+		return (rv);
 
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2546,13 +2547,12 @@
 #endif
 		PMAP_LOCK(pv->pv_pmap);
 		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
-		if (*pte & PG_M) {
-			PMAP_UNLOCK(pv->pv_pmap);
-			return TRUE;
-		}
+		rv = (*pte & PG_M) != 0;
 		PMAP_UNLOCK(pv->pv_pmap);
+		if (rv)
+			break;
 	}
-	return (FALSE);
+	return (rv);
 }
 
 /*

==== //depot/projects/netperf_socket/sys/conf/ldscript.ia64#2 (text+ko) ====

@@ -1,4 +1,4 @@
-/* $FreeBSD: src/sys/conf/ldscript.ia64,v 1.10 2003/09/06 05:15:36 marcel Exp $ */
+/* $FreeBSD: src/sys/conf/ldscript.ia64,v 1.11 2004/06/20 22:32:19 marcel Exp $ */
 OUTPUT_FORMAT("elf64-ia64-little", "elf64-ia64-little", "elf64-ia64-little")
 OUTPUT_ARCH(ia64)
 ENTRY(__start)
@@ -74,6 +74,8 @@
     *(.dtors)
     *(SORT(.dtors.*))
   }
+  . = ALIGN(16);
+  __gp = . + 0x200000;
   .got            : { *(.got.plt) *(.got) }
   .IA_64.pltoff   : { *(.IA_64.pltoff) }
   /* We want the small data sections together, so single-instruction offsets

==== //depot/projects/netperf_socket/sys/i386/i386/pmap.c#12 (text+ko) ====

@@ -73,7 +73,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/i386/i386/pmap.c,v 1.475 2004/06/20 06:11:00 alc Exp $");
+__FBSDID("$FreeBSD: src/sys/i386/i386/pmap.c,v 1.476 2004/06/20 20:57:06 alc Exp $");
 
 /*
  *	Manages physical address maps.
@@ -2533,7 +2533,7 @@
 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
 		pte = vtopte(pv->pv_va);
 #else
-		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
+		pte = pmap_pte_quick(pmap, pv->pv_va);
 #endif
 		tpte = *pte;
 
@@ -2559,7 +2559,7 @@
 		KASSERT(m < &vm_page_array[vm_page_array_size],
 			("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
 
-		pv->pv_pmap->pm_stats.resident_count--;
+		pmap->pm_stats.resident_count--;
 
 		pte_clear(pte);
 
@@ -2571,15 +2571,14 @@
 		}
 
 		npv = TAILQ_NEXT(pv, pv_plist);
-		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
+		TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
 
 		m->md.pv_list_count--;
 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
-		if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
+		if (TAILQ_EMPTY(&m->md.pv_list))
 			vm_page_flag_clear(m, PG_WRITEABLE);
-		}
 
-		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
+		pmap_unuse_pt(pmap, pv->pv_va, pv->pv_ptem);
 		free_pv_entry(pv);
 	}
 	sched_unpin();
@@ -2598,9 +2597,11 @@
 {
 	pv_entry_t pv;
 	pt_entry_t *pte;
+	boolean_t rv;
 
+	rv = FALSE;
 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
-		return FALSE;
+		return (rv);
 
 	sched_pin();
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@@ -2620,15 +2621,13 @@
 #endif
 		PMAP_LOCK(pv->pv_pmap);
 		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
-		if (*pte & PG_M) {
-			sched_unpin();
-			PMAP_UNLOCK(pv->pv_pmap);
-			return TRUE;
-		}
+		rv = (*pte & PG_M) != 0;
 		PMAP_UNLOCK(pv->pv_pmap);
+		if (rv)
+			break;
 	}
 	sched_unpin();
-	return (FALSE);
+	return (rv);
 }
 
 /*

==== //depot/projects/netperf_socket/sys/kern/kern_proc.c#11 (text+ko) ====

@@ -27,11 +27,11 @@
  * SUCH DAMAGE.
  *
  *	@(#)kern_proc.c	8.7 (Berkeley) 2/14/95
- * $FreeBSD: src/sys/kern/kern_proc.c,v 1.210 2004/06/20 02:03:33 gad Exp $
+ * $FreeBSD: src/sys/kern/kern_proc.c,v 1.211 2004/06/20 22:17:22 gad Exp $
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_proc.c,v 1.210 2004/06/20 02:03:33 gad Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_proc.c,v 1.211 2004/06/20 22:17:22 gad Exp $");
 
 #include "opt_ktrace.h"
 #include "opt_kstack_pages.h"
@@ -763,6 +763,8 @@
 		kp->ki_lastcpu = td->td_lastcpu;
 		kp->ki_oncpu = td->td_oncpu;
 		kp->ki_tdflags = td->td_flags;
+		kp->ki_tid = td->td_tid;
+		kp->ki_numthreads = p->p_numthreads;
 		kp->ki_pcb = td->td_pcb;
 		kp->ki_kstack = (void *)td->td_kstack;
 		kp->ki_pctcpu = sched_pctcpu(td);

==== //depot/projects/netperf_socket/sys/kern/sys_socket.c#9 (text+ko) ====

@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/sys_socket.c,v 1.60 2004/06/17 22:48:09 rwatson Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/sys_socket.c,v 1.61 2004/06/20 17:35:50 rwatson Exp $");
 
 #include "opt_mac.h"
 
@@ -170,6 +170,7 @@
 		return (0);
 
 	case FIONREAD:
+		/* Unlocked read. */
 		*(int *)data = so->so_rcv.sb_cc;
 		return (0);
 
@@ -188,6 +189,7 @@
 		return (0);
 
 	case SIOCATMARK:
+		/* Unlocked read. */
 		*(int *)data = (so->so_rcv.sb_state & SBS_RCVATMARK) != 0;
 		return (0);
 	}
@@ -229,7 +231,11 @@
 	/*
 	 * If SBS_CANTRCVMORE is set, but there's still data left in the
 	 * receive buffer, the socket is still readable.
+	 *
+	 * XXXRW: perhaps should lock socket buffer so st_size result
+	 * is consistent.
 	 */
+	/* Unlocked read. */
 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 ||
 	    so->so_rcv.sb_cc != 0)
 		ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;

==== //depot/projects/netperf_socket/sys/kern/uipc_socket.c#21 (text+ko) ====

@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/uipc_socket.c,v 1.182 2004/06/19 03:23:14 rwatson Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/uipc_socket.c,v 1.186 2004/06/21 00:20:42 rwatson Exp $");
 
 #include "opt_inet.h"
 #include "opt_mac.h"
@@ -265,6 +265,11 @@
 {
 	int s, error;
 
+	/*
+	 * XXXRW: Ordering issue here -- perhaps we need to set
+	 * SO_ACCEPTCONN before the call to pru_listen()?
+	 * XXXRW: General atomic test-and-set concerns here also.
+	 */
 	s = splnet();
 	if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
 			    SS_ISDISCONNECTING)) {
@@ -343,9 +348,15 @@
 	SOCKBUF_LOCK(&so->so_snd);
 	so->so_snd.sb_flags |= SB_NOINTR;
 	(void)sblock(&so->so_snd, M_WAITOK);
+	/*
+	 * socantsendmore_locked() drops the socket buffer mutex so that it
+	 * can safely perform wakeups.  Re-acquire the mutex before
+	 * continuing.
+	 */
 	socantsendmore_locked(so);
+	SOCKBUF_LOCK(&so->so_snd);
 	sbunlock(&so->so_snd);
-	sbrelease(&so->so_snd, so);
+	sbrelease_locked(&so->so_snd, so);
 	SOCKBUF_UNLOCK(&so->so_snd);
 	sorflush(so);
 	sodealloc(so);
@@ -425,8 +436,7 @@
 	}
 discard:
 	SOCK_LOCK(so);
-	if (so->so_state & SS_NOFDREF)
-		panic("soclose: NOFDREF");
+	KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
 	so->so_state |= SS_NOFDREF;
 	sorele(so);
 	splx(s);
@@ -1216,7 +1226,7 @@
 		flags |= MSG_TRUNC;
 		if ((flags & MSG_PEEK) == 0) {
 			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
-			(void) sbdroprecord(&so->so_rcv);
+			(void) sbdroprecord_locked(&so->so_rcv);
 		}
 	}
 	if ((flags & MSG_PEEK) == 0) {
@@ -1281,23 +1291,41 @@
 	struct protosw *pr = so->so_proto;
 	struct sockbuf asb;
 
+	/*
+	 * XXXRW: This is quite ugly.  The existing code made a copy of the
+	 * socket buffer, then zero'd the original to clear the buffer
+	 * fields.  However, with mutexes in the socket buffer, this causes
+	 * problems.  We only clear the zeroable bits of the original;
+	 * however, we have to initialize and destroy the mutex in the copy
+	 * so that dom_dispose() and sbrelease() can lock t as needed.
+	 */
 	SOCKBUF_LOCK(sb);
 	sb->sb_flags |= SB_NOINTR;
 	(void) sblock(sb, M_WAITOK);
+	/*
+	 * socantrcvmore_locked() drops the socket buffer mutex so that it
+	 * can safely perform wakeups.  Re-acquire the mutex before
+	 * continuing.
+	 */
 	socantrcvmore_locked(so);
+	SOCKBUF_LOCK(sb);
 	sbunlock(sb);
-	asb = *sb;
 	/*
 	 * Invalidate/clear most of the sockbuf structure, but leave
 	 * selinfo and mutex data unchanged.
 	 */
+	bzero(&asb, offsetof(struct sockbuf, sb_startzero));
+	bcopy(&sb->sb_startzero, &asb.sb_startzero,
+	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
 	bzero(&sb->sb_startzero,
 	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
 	SOCKBUF_UNLOCK(sb);
 
+	SOCKBUF_LOCK_INIT(&asb, "so_rcv");
 	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
 		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
 	sbrelease(&asb, so);
+	SOCKBUF_LOCK_DESTROY(&asb);
 }
 
 #ifdef INET
@@ -1652,8 +1680,15 @@
 #endif
 
 		case SO_LINGER:
+			/*
+			 * XXXRW: We grab the lock here to get a consistent
+			 * snapshot of both fields.  This may not really
+			 * be necessary.
+			 */
+			SOCK_LOCK(so);
 			l.l_onoff = so->so_options & SO_LINGER;
 			l.l_linger = so->so_linger;
+			SOCK_UNLOCK(so);
 			error = sooptcopyout(sopt, &l, sizeof l);
 			break;
 

==== //depot/projects/netperf_socket/sys/kern/uipc_socket2.c#18 (text+ko) ====

@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/uipc_socket2.c,v 1.133 2004/06/19 03:23:14 rwatson Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/uipc_socket2.c,v 1.134 2004/06/21 00:20:42 rwatson Exp $");
 
 #include "opt_mac.h"
 #include "opt_param.h"
@@ -199,9 +199,9 @@
 	SOCKBUF_UNLOCK(&so->so_rcv);
 	SOCKBUF_LOCK(&so->so_snd);
 	so->so_snd.sb_state |= SBS_CANTSENDMORE;
+	sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
 	SOCKBUF_UNLOCK(&so->so_snd);
 	wakeup(&so->so_timeo);
-	sbdrop(&so->so_snd, so->so_snd.sb_cc);
 	sowwakeup(so);
 	sorwakeup(so);
 }
@@ -299,14 +299,38 @@
  * protocol when it detects that the peer will send no more data.
  * Data queued for reading in the socket may yet be read.
  */
+void
+socantsendmore_locked(so)
+	struct socket *so;
+{
+
+	SOCKBUF_LOCK_ASSERT(&so->so_snd);
+
+	so->so_snd.sb_state |= SBS_CANTSENDMORE;
+	sowwakeup_locked(so);
+	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
+}
 
 void
 socantsendmore(so)
 	struct socket *so;
 {
 
-	so->so_snd.sb_state |= SBS_CANTSENDMORE;
-	sowwakeup(so);
+	SOCKBUF_LOCK(&so->so_snd);
+	socantsendmore_locked(so);
+	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
+}
+
+void
+socantrcvmore_locked(so)
+	struct socket *so;
+{
+
+	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+
+	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
+	sorwakeup_locked(so);
+	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
 }
 
 void
@@ -324,8 +348,9 @@
 	struct socket *so;
 {
 
-	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
-	sorwakeup(so);
+	SOCKBUF_LOCK(&so->so_rcv);
+	socantrcvmore_locked(so);
+	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
 }
 
 void
@@ -380,29 +405,16 @@
 }
 
 /*
- * The part of sowakeup that must be done while
- * holding the sockbuf lock.
- */
-static __inline void
-sowakeup_under_lock(struct socket *so, struct sockbuf *sb)
-{
-	SOCKBUF_LOCK_ASSERT(sb);
-
-	selwakeuppri(&sb->sb_sel, PSOCK);
-	sb->sb_flags &= ~SB_SEL;
-	if (sb->sb_flags & SB_WAIT) {
-		sb->sb_flags &= ~SB_WAIT;
-		wakeup(&sb->sb_cc);
-	}
-}
-
-/*
- * Wakeup processes waiting on a socket buffer.
- * Do asynchronous notification via SIGIO
- * if the socket has the SS_ASYNC flag set.
+ * Wakeup processes waiting on a socket buffer.  Do asynchronous
+ * notification via SIGIO if the socket has the SS_ASYNC flag set.
  *
- * The caller is assumed to hold the necessary
- * sockbuf lock.
+ * Called with the socket buffer lock held; will release the lock by the end
+ * of the function.  This allows the caller to acquire the socket buffer lock
+ * while testing for the need for various sorts of wakeup and hold it through
+ * to the point where it's no longer required.  We currently hold the lock
+ * through calls out to other subsystems (with the exception of kqueue), and
+ * then release it to avoid lock order issues.  It's not clear that's
+ * correct.
  */
 void
 sowakeup_locked(so, sb)
@@ -434,17 +446,23 @@
 	register struct sockbuf *sb;
 {
 
-	SOCKBUF_LOCK(sb);
-	sowakeup_under_lock(so, sb);
+	SOCKBUF_LOCK_ASSERT(sb);
+
+	selwakeuppri(&sb->sb_sel, PSOCK);
+	sb->sb_flags &= ~SB_SEL;
+	if (sb->sb_flags & SB_WAIT) {
+		sb->sb_flags &= ~SB_WAIT;
+		wakeup(&sb->sb_cc);
+	}
+	KNOTE(&sb->sb_sel.si_note, 0);
 	SOCKBUF_UNLOCK(sb);
-
 	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
 		pgsigio(&so->so_sigio, SIGIO, 0);
 	if (sb->sb_flags & SB_UPCALL)
 		(*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT);
 	if (sb->sb_flags & SB_AIO)		/* XXX locking */
 		aio_swake(so, sb);
-	KNOTE(&sb->sb_sel.si_note, 0);		/* XXX locking? */
+	mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
 }
 
 /*
@@ -566,17 +584,29 @@
  * Free mbufs held by a socket, and reserved mbuf space.
  */
 void
-sbrelease(sb, so)
+sbrelease_locked(sb, so)
 	struct sockbuf *sb;
 	struct socket *so;
 {
 
-	sbflush(sb);
+	SOCKBUF_LOCK_ASSERT(sb);
+
+	sbflush_locked(sb);
 	(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
 	    RLIM_INFINITY);
 	sb->sb_mbmax = 0;
 }
 
+void
+sbrelease(sb, so)
+	struct sockbuf *sb;
+	struct socket *so;
+{
+
+	SOCKBUF_LOCK(sb);
+	sbrelease_locked(sb, so);
+	SOCKBUF_UNLOCK(sb);
+}
 /*
  * Routines to add and remove
  * data from an mbuf queue.
@@ -653,6 +683,7 @@
 #endif /* SOCKBUF_DEBUG */
 
 #define SBLINKRECORD(sb, m0) do {					\
+	SOCKBUF_LOCK_ASSERT(sb);					\
 	if ((sb)->sb_lastrecord != NULL)				\
 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
 	else								\
@@ -673,11 +704,11 @@
 {
 	register struct mbuf *n;
 
+	SOCKBUF_LOCK_ASSERT(sb);
+
 	if (m == 0)
 		return;
 
-	SOCKBUF_LOCK_ASSERT(sb);
-
 	SBLASTRECORDCHK(sb);
 	n = sb->sb_mb;
 	if (n) {
@@ -725,12 +756,10 @@
 	struct sockbuf *sb;
 	struct mbuf *m;
 {
-	if (!SOCKBUF_OWNED(sb)) {
-		SOCKBUF_LOCK(sb);
-		sbappend_locked(sb, m);
-		SOCKBUF_UNLOCK(sb);
-	} else
-		sbappend_locked(sb, m);
+
+	SOCKBUF_LOCK(sb);
+	sbappend_locked(sb, m);
+	SOCKBUF_UNLOCK(sb);
 }
 
 /*
@@ -762,12 +791,10 @@
 void
 sbappendstream(struct sockbuf *sb, struct mbuf *m)
 {
-	if (!SOCKBUF_OWNED(sb)) {
-		SOCKBUF_LOCK(sb);
-		sbappendstream_locked(sb, m);
-		SOCKBUF_UNLOCK(sb);
-	} else
-		sbappendstream_locked(sb, m);
+
+	SOCKBUF_LOCK(sb);
+	sbappendstream_locked(sb, m);
+	SOCKBUF_UNLOCK(sb);
 }
 
 #ifdef SOCKBUF_DEBUG
@@ -846,12 +873,10 @@
 	register struct sockbuf *sb;
 	register struct mbuf *m0;
 {
-	if (!SOCKBUF_OWNED(sb)) {
-		SOCKBUF_LOCK(sb);
-		sbappendrecord_locked(sb, m0);
-		SOCKBUF_UNLOCK(sb);
-	} else
-		sbappendrecord_locked(sb, m0);
+
+	SOCKBUF_LOCK(sb);
+	sbappendrecord_locked(sb, m0);
+	SOCKBUF_UNLOCK(sb);
 }
 
 /*
@@ -912,12 +937,10 @@
 	register struct sockbuf *sb;
 	register struct mbuf *m0;
 {
-	if (!SOCKBUF_OWNED(sb)) {
-		SOCKBUF_LOCK(sb);
-		sbinsertoob_locked(sb, m0);
-		SOCKBUF_UNLOCK(sb);
-	} else
-		sbinsertoob_locked(sb, m0);
+
+	SOCKBUF_LOCK(sb);
+	sbinsertoob_locked(sb, m0);
+	SOCKBUF_UNLOCK(sb);
 }
 
 /*
@@ -938,7 +961,7 @@
 	SOCKBUF_LOCK_ASSERT(sb);
 
 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
-		panic("sbappendaddr");
+		panic("sbappendaddr_locked");
 	if (m0)
 		space += m0->m_pkthdr.len;
 	space += m_length(control, &n);
@@ -980,17 +1003,14 @@
 int
 sbappendaddr(sb, asa, m0, control)
 	struct sockbuf *sb;
-	struct sockaddr *asa;
+	const struct sockaddr *asa;
 	struct mbuf *m0, *control;
 {
 	int retval;
 
-	if (!SOCKBUF_OWNED(sb)) {
-		SOCKBUF_LOCK(sb);
-		retval = sbappendaddr_locked(sb, asa, m0, control);
-		SOCKBUF_UNLOCK(sb);
-	} else
-		retval = sbappendaddr_locked(sb, asa, m0, control);
+	SOCKBUF_LOCK(sb);
+	retval = sbappendaddr_locked(sb, asa, m0, control);
+	SOCKBUF_UNLOCK(sb);
 	return (retval);
 }
 
@@ -1005,7 +1025,7 @@
 	SOCKBUF_LOCK_ASSERT(sb);
 
 	if (control == 0)
-		panic("sbappendcontrol");
+		panic("sbappendcontrol_locked");
 	space = m_length(control, &n) + m_length(m0, NULL);
 
 	if (space > sbspace(sb))
@@ -1034,12 +1054,9 @@
 {
 	int retval;
 
-	if (!SOCKBUF_OWNED(sb)) {
-		SOCKBUF_LOCK(sb);
-		retval = sbappendcontrol(sb, m0, control);
-		SOCKBUF_UNLOCK(sb);
-	} else
-		retval = sbappendcontrol(sb, m0, control);
+	SOCKBUF_LOCK(sb);
+	retval = sbappendcontrol_locked(sb, m0, control);
+	SOCKBUF_UNLOCK(sb);
 	return (retval);
 }
 
@@ -1110,12 +1127,14 @@
  * Check that all resources are reclaimed.
  */
 void
-sbflush(sb)
+sbflush_locked(sb)
 	register struct sockbuf *sb;
 {
 
+	SOCKBUF_LOCK_ASSERT(sb);
+
 	if (sb->sb_flags & SB_LOCK)
-		panic("sbflush: locked");
+		panic("sbflush_locked: locked");
 	while (sb->sb_mbcnt) {
 		/*
 		 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
@@ -1123,18 +1142,27 @@
 		 */
 		if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
 			break;
-		sbdrop(sb, (int)sb->sb_cc);
+		sbdrop_locked(sb, (int)sb->sb_cc);
 	}
 	if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt)
-		panic("sbflush: cc %u || mb %p || mbcnt %u",
-			sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
+		panic("sbflush_locked: cc %u || mb %p || mbcnt %u", sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
+}
+
+void
+sbflush(sb)
+	register struct sockbuf *sb;
+{
+
+	SOCKBUF_LOCK(sb);
+	sbflush_locked(sb);
+	SOCKBUF_UNLOCK(sb);
 }
 
 /*
  * Drop data from (the front of) a sockbuf.
  */
 void
-sbdrop(sb, len)
+sbdrop_locked(sb, len)
 	register struct sockbuf *sb;
 	register int len;
 {
@@ -1145,6 +1173,8 @@
 	if (need_lock)
 		SOCKBUF_LOCK(sb);
 
+	SOCKBUF_LOCK_ASSERT(sb);
+
 	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
 	while (len > 0) {
 		if (m == 0) {
@@ -1194,11 +1224,25 @@
 }
 
 /*
+ * Drop data from (the front of) a sockbuf.
+ */
+void
+sbdrop(sb, len)
+	register struct sockbuf *sb;
+	register int len;
+{
+
+	SOCKBUF_LOCK(sb);
+	sbdrop_locked(sb, len);
+	SOCKBUF_UNLOCK(sb);
+}
+
+/*
  * Drop a record off the front of a sockbuf
  * and move the next record to the front.
  */
 void
-sbdroprecord(sb)
+sbdroprecord_locked(sb)
 	register struct sockbuf *sb;
 {
 	register struct mbuf *m;
@@ -1207,6 +1251,8 @@
 	if (need_lock)
 		SOCKBUF_LOCK(sb);
 
+	SOCKBUF_LOCK_ASSERT(sb);
+
 	m = sb->sb_mb;
 	if (m) {
 		sb->sb_mb = m->m_nextpkt;
@@ -1222,6 +1268,20 @@
 }
 
 /*
+ * Drop a record off the front of a sockbuf
+ * and move the next record to the front.
+ */
+void
+sbdroprecord(sb)
+	register struct sockbuf *sb;
+{
+
+	SOCKBUF_LOCK(sb);
+	sbdroprecord_locked(sb);
+	SOCKBUF_UNLOCK(sb);
+}
+
+/*
  * Create a "control" mbuf containing the specified data
  * with the specified type for presentation on a socket buffer.
  */

==== //depot/projects/netperf_socket/sys/kern/uipc_usrreq.c#15 (text+ko) ====

@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.126 2004/06/17 17:16:49 phk Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.129 2004/06/21 00:20:42 rwatson Exp $");
 
 #include "opt_mac.h"
 
@@ -288,7 +288,6 @@
 			break;
 		}
 		so2 = unp->unp_conn->unp_socket;
-		/* NB: careful of order here */
 		SOCKBUF_LOCK(&so2->so_snd);
 		SOCKBUF_LOCK(&so->so_rcv);
 		/*
@@ -302,8 +301,9 @@
 		(void)chgsbsize(so2->so_cred->cr_uidinfo, &so2->so_snd.sb_hiwat,
 		    newhiwat, RLIM_INFINITY);
 		unp->unp_cc = so->so_rcv.sb_cc;
-		sowwakeup_locked(so2);
+		SOCKBUF_UNLOCK(&so->so_rcv);
 		SOCKBUF_UNLOCK(&so2->so_snd);
+		sowwakeup(so2);
 		break;
 
 	default:
@@ -371,10 +371,12 @@
 			from = &sun_noname;
 		SOCKBUF_LOCK(&so2->so_rcv);
 		if (sbappendaddr_locked(&so2->so_rcv, from, m, control)) {
-			sorwakeup_locked(so2);
+			SOCKBUF_UNLOCK(&so2->so_rcv);
+			sorwakeup(so2);
 			m = NULL;
 			control = NULL;
 		} else {
+			SOCKBUF_UNLOCK(&so2->so_rcv);
 			error = ENOBUFS;
 		}
 		SOCKBUF_UNLOCK(&so2->so_rcv);
@@ -429,8 +431,8 @@
 		(void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.sb_hiwat,
 		    newhiwat, RLIM_INFINITY);
 		unp->unp_conn->unp_cc = so2->so_rcv.sb_cc;
-		sorwakeup_locked(so2);
 		SOCKBUF_UNLOCK(&so2->so_rcv);
+		sorwakeup(so2);
 		m = NULL;
 		break;
 
@@ -933,6 +935,7 @@
 	struct unpcb *unp;
 {
 	register struct unpcb *unp2 = unp->unp_conn;
+	struct socket *so;
 
 	UNP_LOCK_ASSERT();
 
@@ -943,7 +946,10 @@
 
 	case SOCK_DGRAM:
 		LIST_REMOVE(unp, unp_reflink);
-		unp->unp_socket->so_state &= ~SS_ISCONNECTED;
+		so = unp->unp_socket;
+		SOCK_LOCK(so);
+		so->so_state &= ~SS_ISCONNECTED;
+		SOCK_UNLOCK(so);
 		break;
 
 	case SOCK_STREAM:

==== //depot/projects/netperf_socket/sys/kern/vfs_mount.c#8 (text+ko) ====

@@ -59,7 +59,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/vfs_mount.c,v 1.128 2004/06/17 21:24:13 phk Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/vfs_mount.c,v 1.129 2004/06/20 17:31:01 tmm Exp $");
 
 #include <sys/param.h>
 #include <sys/conf.h>
@@ -1488,6 +1488,7 @@
 		    UIO_SYSSPACE, cp, curthread);
 		nid.ni_startdir = vroot;
 		nid.ni_pathlen = strlen(cp);
+		nid.ni_cnd.cn_cred = curthread->td_ucred;
 		nid.ni_cnd.cn_nameptr = cp;
 
 		error = lookup(&nid);

==== //depot/projects/netperf_socket/sys/net/zlib.c#2 (text+ko) ====

@@ -10,7 +10,7 @@
  * - added inflateIncomp and deflateOutputPending
  * - allow strm->next_out to be NULL, meaning discard the output
  *
- * $FreeBSD: src/sys/net/zlib.c,v 1.17 2003/02/02 13:52:24 alfred Exp $
+ * $FreeBSD: src/sys/net/zlib.c,v 1.18 2004/06/20 17:42:34 markm Exp $
  */
 
 /* 
@@ -57,6 +57,8 @@
 #include <sys/time.h>
 #include <sys/systm.h>
 #include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
 #  define HAVE_MEMCPY
 
 #else
@@ -5380,3 +5382,25 @@
     return (s2 << 16) | s1;
 }
 /* --- adler32.c */
+
+#ifdef _KERNEL
+static int
+zlib_modevent(module_t mod, int type, void *unused)
+{
+	switch (type) {
+	case MOD_LOAD:
+		return 0;
+	case MOD_UNLOAD:
+		return 0;
+	}
+	return EINVAL;
+}
+
+static moduledata_t zlib_mod = {
+	"zlib",
+	zlib_modevent,
+	0
+};
+DECLARE_MODULE(zlib, zlib_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(zlib, 1);
+#endif /* _KERNEL */

==== //depot/projects/netperf_socket/sys/netgraph/ng_fec.c#4 (text+ko) ====

@@ -32,7 +32,7 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  *
- * $FreeBSD: src/sys/netgraph/ng_fec.c,v 1.9 2004/05/29 00:51:11 julian Exp $
+ * $FreeBSD: src/sys/netgraph/ng_fec.c,v 1.11 2004/06/20 21:08:58 wpaul Exp $
  */
 /*
  * Copyright (c) 1996-1999 Whistle Communications, Inc.
@@ -125,7 +125,15 @@
 #include <netgraph/ng_parse.h>
 #include <netgraph/ng_fec.h>

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200406210059.i5L0xF7f030948>