Date: Wed, 14 Sep 2005 19:33:53 GMT From: Robert Watson <rwatson@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 83611 for review Message-ID: <200509141933.j8EJXr9c086971@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=83611 Change 83611 by rwatson@rwatson_peppercorn on 2005/09/14 19:33:13 Un-inline sblock() and sbunlock(). Add sb_lock_owner to socket buffer. Manager sb_lock_owner field in sb_lock(), sblock(), sbunlock(). Add sb_lock_assert()'s in sosend(), soreceive(), and other spots. Affected files ... .. //depot/projects/netsmp/src/sys/kern/uipc_socket.c#5 edit .. //depot/projects/netsmp/src/sys/kern/uipc_socket2.c#3 edit .. //depot/projects/netsmp/src/sys/sys/socketvar.h#3 edit Differences ... ==== //depot/projects/netsmp/src/sys/kern/uipc_socket.c#5 (text+ko) ==== @@ -384,6 +384,7 @@ SOCKBUF_LOCK(&so->so_snd); so->so_snd.sb_flags |= SB_NOINTR; (void)sblock(&so->so_snd, M_WAITOK); + sb_lock_assert(&so->so_snd); /* * socantsendmore_locked() drops the socket buffer mutex so that it * can safely perform wakeups. Re-acquire the mutex before @@ -655,6 +656,7 @@ error = sblock(&so->so_snd, SBLOCKWAIT(flags)); if (error) goto out_locked; + sb_lock_assert(&so->so_snd); do { SOCKBUF_LOCK_ASSERT(&so->so_snd); if (so->so_snd.sb_state & SBS_CANTSENDMORE) @@ -1005,6 +1007,7 @@ error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); if (error) goto out; + sb_lock_assert(&so->so_rcv); m = so->so_rcv.sb_mb; /* @@ -1245,8 +1248,12 @@ disposable); } else #endif /* ZERO_COPY_SOCKETS */ + { + sb_lock_assert(&so->so_rcv); error = uiomove(mtod(m, char *) + moff, (int)len, uio); SOCKBUF_LOCK(&so->so_rcv); + sb_lock_assert(&so->so_rcv); + } if (error) goto release; } else @@ -1442,6 +1449,7 @@ SOCKBUF_LOCK(sb); sb->sb_flags |= SB_NOINTR; (void) sblock(sb, M_WAITOK); + sb_lock_assert(sb); /* * socantrcvmore_locked() drops the socket buffer mutex so that it * can safely perform wakeups. Re-acquire the mutex before ==== //depot/projects/netsmp/src/sys/kern/uipc_socket2.c#3 (text+ko) ==== @@ -353,6 +353,8 @@ /* * Lock a sockbuf already known to be locked; * return any error returned from sleep (EINTR). + * + * XXXRW: Isn't a bit silly that we repeatedly clear and re-set SB_WANT? */ int sb_lock(sb) @@ -371,10 +373,67 @@ return (error); } sb->sb_flags |= SB_LOCK; + sb->sb_lock_owner = curthread; + return (0); +} + +/* + * Un-inlined sblock() macro. + */ +int +sblock(sb, wf) + struct sockbuf *sb; + int wf; +{ + + SOCKBUF_LOCK_ASSERT(sb); + + if (sb->sb_flags & SB_LOCK) { + if (wf == M_WAITOK) + return (sb_lock(sb)); + else + return (EWOULDBLOCK); + } + sb->sb_flags |= SB_LOCK; + sb->sb_lock_owner = curthread; return (0); } /* + * Un-inlined sbunlock() macro. + */ +void +sbunlock(sb) + struct sockbuf *sb; +{ + + SOCKBUF_LOCK_ASSERT(sb); + KASSERT(sb->sb_flags & SB_LOCK, ("sbunlock: !SB_LOCK")); + KASSERT(sb->sb_lock_owner == curthread, ("sbunlock: !curthread")); + + sb->sb_flags &= ~SB_LOCK; + sb->sb_lock_owner = NULL; + if (sb->sb_flags & SB_WANT) { + sb->sb_flags &= ~SB_WANT; + wakeup(&sb->sb_flags); + } +} + + +/* + * We can't assert that the current thread owns the socket buffer sleep lock, + * but we can at least assert that it is held. + */ +void +sb_lock_assert(sb) + struct sockbuf *sb; +{ + + KASSERT(sb->sb_flags & SB_LOCK, ("sb_lock_assert: failed")); + KASSERT(sb->sb_lock_owner == curthread, ("sb_lock_assert: !curthread")); +} + +/* * Wakeup processes waiting on a socket buffer. Do asynchronous * notification via SIGIO if the socket has the SS_ASYNC flag set. * ==== //depot/projects/netsmp/src/sys/sys/socketvar.h#3 (text+ko) ==== @@ -111,6 +111,7 @@ int sb_lowat; /* (c/d) low water mark */ int sb_timeo; /* (c/d) timeout for read/write */ short sb_flags; /* (c/d) flags, see below */ + struct thread *sb_lock_owner; /* (c/d), sb_lock owner */ } so_rcv, so_snd; /* * Constants for sb_flags field of struct sockbuf. @@ -317,25 +318,6 @@ } /* - * Set lock on sockbuf sb; sleep if lock is already held. - * Unless SB_NOINTR is set on sockbuf, sleep is interruptible. - * Returns error without lock if sleep is interrupted. - */ -#define sblock(sb, wf) ((sb)->sb_flags & SB_LOCK ? \ - (((wf) == M_WAITOK) ? sb_lock(sb) : EWOULDBLOCK) : \ - ((sb)->sb_flags |= SB_LOCK), 0) - -/* release lock on sockbuf sb */ -#define sbunlock(sb) do { \ - SOCKBUF_LOCK_ASSERT(sb); \ - (sb)->sb_flags &= ~SB_LOCK; \ - if ((sb)->sb_flags & SB_WANT) { \ - (sb)->sb_flags &= ~SB_WANT; \ - wakeup(&(sb)->sb_flags); \ - } \ -} while (0) - -/* * soref()/sorele() ref-count the socket structure. Note that you must * still explicitly close the socket, but the last ref count will free * the structure. @@ -487,7 +469,10 @@ struct thread *td); void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb); int sbwait(struct sockbuf *sb); +int sblock(struct sockbuf *sb, int wf); int sb_lock(struct sockbuf *sb); +void sb_lock_assert(struct sockbuf *sb); +void sbunlock(struct sockbuf *sb); int soabort(struct socket *so); int soaccept(struct socket *so, struct sockaddr **nam); struct socket *soalloc(int mflags);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200509141933.j8EJXr9c086971>