Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 27 Sep 2007 23:15:18 GMT
From:      John Birrell <jb@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 126899 for review
Message-ID:  <200709272315.l8RNFIAc087844@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=126899

Change 126899 by jb@jb_freebsd1 on 2007/09/27 23:14:52

	IF6

Affected files ...

.. //depot/projects/dtrace6/src/lib/libarchive/archive_read.3#2 integrate
.. //depot/projects/dtrace6/src/lib/libarchive/archive_write_disk.c#2 integrate
.. //depot/projects/dtrace6/src/sys/kern/kern_condvar.c#2 integrate
.. //depot/projects/dtrace6/src/sys/kern/kern_mutex.c#2 integrate
.. //depot/projects/dtrace6/src/sys/kern/kern_rwlock.c#2 integrate
.. //depot/projects/dtrace6/src/sys/kern/kern_sx.c#2 integrate
.. //depot/projects/dtrace6/src/sys/kern/kern_synch.c#2 integrate
.. //depot/projects/dtrace6/src/sys/kern/subr_sleepqueue.c#2 integrate
.. //depot/projects/dtrace6/src/sys/kern/subr_turnstile.c#2 integrate
.. //depot/projects/dtrace6/src/sys/sys/proc.h#2 integrate
.. //depot/projects/dtrace6/src/sys/sys/sleepqueue.h#2 integrate
.. //depot/projects/dtrace6/src/sys/sys/turnstile.h#2 integrate
.. //depot/projects/dtrace6/src/usr.bin/systat/pigs.c#2 integrate
.. //depot/projects/dtrace6/src/usr.bin/tar/bsdtar.c#2 integrate

Differences ...

==== //depot/projects/dtrace6/src/lib/libarchive/archive_read.3#2 (text+ko) ====

@@ -22,7 +22,7 @@
 .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 .\" SUCH DAMAGE.
 .\"
-.\" $FreeBSD: src/lib/libarchive/archive_read.3,v 1.20.2.5 2007/07/05 01:56:39 kientzle Exp $
+.\" $FreeBSD: src/lib/libarchive/archive_read.3,v 1.20.2.6 2007/09/27 03:47:26 kientzle Exp $
 .\"
 .Dd August 19, 2006
 .Dt archive_read 3
@@ -267,7 +267,7 @@
 The
 .Va flags
 argument is passed unmodified to
-.Xr archiv_write_disk_set_options 3 .
+.Xr archive_write_disk_set_options 3 .
 .It Fn archive_read_extract_set_progress_callback
 Sets a pointer to a user-defined callback that can be used
 for updating progress displays during extraction.

==== //depot/projects/dtrace6/src/lib/libarchive/archive_write_disk.c#2 (text+ko) ====

@@ -25,7 +25,7 @@
  */
 
 #include "archive_platform.h"
-__FBSDID("$FreeBSD: src/lib/libarchive/archive_write_disk.c,v 1.12.2.3 2007/08/25 04:42:01 kientzle Exp $");
+__FBSDID("$FreeBSD: src/lib/libarchive/archive_write_disk.c,v 1.12.2.4 2007/09/27 03:45:24 kientzle Exp $");
 
 #ifdef HAVE_SYS_TYPES_H
 #include <sys/types.h>
@@ -448,8 +448,10 @@
 
 	__archive_check_magic(&a->archive, ARCHIVE_WRITE_DISK_MAGIC,
 	    ARCHIVE_STATE_DATA, "archive_write_disk_block");
-	if (a->fd < 0)
-		return (ARCHIVE_OK);
+	if (a->fd < 0) {
+		archive_set_error(&a->archive, 0, "File not open");
+		return (ARCHIVE_WARN);
+	}
 	archive_clear_error(&a->archive);
 
 	/* Seek if necessary to the specified offset. */
@@ -478,12 +480,17 @@
 _archive_write_data(struct archive *_a, const void *buff, size_t size)
 {
 	struct archive_write_disk *a = (struct archive_write_disk *)_a;
+	int r;
+
 	__archive_check_magic(&a->archive, ARCHIVE_WRITE_DISK_MAGIC,
 	    ARCHIVE_STATE_DATA, "archive_write_data");
 	if (a->fd < 0)
 		return (ARCHIVE_OK);
 
-	return (_archive_write_data_block(_a, buff, size, a->offset));
+	r = _archive_write_data_block(_a, buff, size, a->offset);
+	if (r < ARCHIVE_OK)
+		return (r);
+	return (size);
 }
 
 static int

==== //depot/projects/dtrace6/src/sys/kern/kern_condvar.c#2 (text+ko) ====

@@ -25,7 +25,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_condvar.c,v 1.52.2.1 2006/02/27 00:19:40 davidxu Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_condvar.c,v 1.52.2.2 2007/09/27 20:24:55 jhb Exp $");
 
 #include "opt_ktrace.h"
 
@@ -124,7 +124,7 @@
 	DROP_GIANT();
 	mtx_unlock(mp);
 
-	sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+	sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR, 0);
 	sleepq_wait(cvp);
 
 #ifdef KTRACE
@@ -178,7 +178,7 @@
 	mtx_unlock(mp);
 
 	sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
-	    SLEEPQ_INTERRUPTIBLE);
+	    SLEEPQ_INTERRUPTIBLE, 0);
 	rval = sleepq_wait_sig(cvp);
 
 #ifdef KTRACE
@@ -231,7 +231,7 @@
 	DROP_GIANT();
 	mtx_unlock(mp);
 
-	sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+	sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR, 0);
 	sleepq_set_timeout(cvp, timo);
 	rval = sleepq_timedwait(cvp);
 
@@ -289,7 +289,7 @@
 	mtx_unlock(mp);
 
 	sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
-	    SLEEPQ_INTERRUPTIBLE);
+	    SLEEPQ_INTERRUPTIBLE, 0);
 	sleepq_set_timeout(cvp, timo);
 	rval = sleepq_timedwait_sig(cvp);
 
@@ -318,7 +318,7 @@
 	sleepq_lock(cvp);
 	if (cvp->cv_waiters > 0) {
 		cvp->cv_waiters--;
-		sleepq_signal(cvp, SLEEPQ_CONDVAR, -1);
+		sleepq_signal(cvp, SLEEPQ_CONDVAR, -1, 0);
 	} else
 		sleepq_release(cvp);
 }
@@ -334,7 +334,7 @@
 	sleepq_lock(cvp);
 	if (cvp->cv_waiters > 0) {
 		cvp->cv_waiters = 0;
-		sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri);
+		sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
 	} else
 		sleepq_release(cvp);
 }

==== //depot/projects/dtrace6/src/sys/kern/kern_mutex.c#2 (text+ko) ====

@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_mutex.c,v 1.154.2.9 2006/08/11 18:54:10 jhb Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_mutex.c,v 1.154.2.10 2007/09/27 20:24:55 jhb Exp $");
 
 #include "opt_adaptive_mutexes.h"
 #include "opt_ddb.h"
@@ -576,7 +576,8 @@
 		/*
 		 * Block on the turnstile.
 		 */
-		turnstile_wait(&m->mtx_object, mtx_owner(m));
+		turnstile_wait(&m->mtx_object, mtx_owner(m),
+		    TS_EXCLUSIVE_QUEUE);
 	}
 
 #ifdef KTR
@@ -684,13 +685,13 @@
 #endif
 #ifndef PREEMPTION
 	/* XXX */
-	td1 = turnstile_head(ts);
+	td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE);
 #endif
 #ifdef MUTEX_WAKE_ALL
-	turnstile_broadcast(ts);
+	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
 	_release_lock_quick(m);
 #else
-	if (turnstile_signal(ts)) {
+	if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) {
 		_release_lock_quick(m);
 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
@@ -701,7 +702,7 @@
 			    m);
 	}
 #endif
-	turnstile_unpend(ts);
+	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
 
 #ifndef PREEMPTION
 	/*

==== //depot/projects/dtrace6/src/sys/kern/kern_rwlock.c#2 (text+ko) ====

@@ -32,9 +32,10 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_rwlock.c,v 1.28.2.2 2007/09/05 22:18:39 alfred Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_rwlock.c,v 1.28.2.4 2007/09/27 20:24:55 jhb Exp $");
 
 #include "opt_ddb.h"
+#include "opt_no_adaptive_rwlocks.h"
 
 #include <sys/param.h>
 #include <sys/ktr.h>
@@ -44,7 +45,7 @@
 #include <sys/rwlock.h>
 #include <sys/systm.h>
 #include <sys/turnstile.h>
-#include <sys/lock_profile.h>
+
 #include <machine/cpu.h>
 
 CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE);
@@ -100,7 +101,6 @@
 void
 rw_init_flags(struct rwlock *rw, const char *name, int opts)
 {
-	struct lock_object *lock;
 	int flags;
 
 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
@@ -117,23 +117,17 @@
 
 	rw->rw_lock = RW_UNLOCKED;
 	rw->rw_recurse = 0;
-	lock = &rw->lock_object;
-	lock->lo_class = &lock_class_rw;
-	lock->lo_flags = flags;
-	lock->lo_name = lock->lo_type = name;
-	LOCK_LOG_INIT(lock, opts);
-	WITNESS_INIT(lock);
+	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
 }
 
 void
 rw_destroy(struct rwlock *rw)
 {
 
-	LOCK_LOG_DESTROY(&rw->lock_object, 0);
 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
 	KASSERT(rw->rw_recurse == 0, ("rw lock still recursed"));
 	rw->rw_lock = RW_DESTROYED;
-	WITNESS_DESTROY(&rw->lock_object);
+	lock_destroy(&rw->lock_object);
 }
 
 void
@@ -291,12 +285,12 @@
 		}
 
 #ifdef ADAPTIVE_RWLOCKS
-		owner = (struct thread *)RW_OWNER(x);
 		/*
 		 * If the owner is running on another CPU, spin until
 		 * the owner stops running or the state of the lock
 		 * changes.
 		 */
+		owner = (struct thread *)RW_OWNER(x);
 		if (TD_IS_RUNNING(owner)) {
 			turnstile_release(&rw->lock_object);
 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -316,7 +310,7 @@
 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
 			    rw);
-		turnstile_wait_queue(&rw->lock_object, rw_owner(rw),
+		turnstile_wait(&rw->lock_object, rw_owner(rw),
 		    TS_SHARED_QUEUE);
 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
@@ -446,8 +440,8 @@
 		 */
 		ts = turnstile_lookup(&rw->lock_object);
 		MPASS(ts != NULL);
-		turnstile_broadcast_queue(ts, TS_EXCLUSIVE_QUEUE);
-		turnstile_unpend_queue(ts, TS_SHARED_LOCK);
+		turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
+		turnstile_unpend(ts, TS_SHARED_LOCK);
 		break;
 	}
 	lock_profile_release_lock(&rw->lock_object);
@@ -461,7 +455,6 @@
 void
 _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
 {
-	//struct turnstile *ts;
 #ifdef ADAPTIVE_RWLOCKS
 	volatile struct thread *owner;
 #endif
@@ -563,7 +556,7 @@
 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
 			    rw);
-		turnstile_wait_queue(&rw->lock_object, rw_owner(rw),
+		turnstile_wait(&rw->lock_object, rw_owner(rw),
 		    TS_EXCLUSIVE_QUEUE);
 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
@@ -644,7 +637,7 @@
 		queue = TS_SHARED_QUEUE;
 #ifdef ADAPTIVE_RWLOCKS
 		if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
-		    !turnstile_empty_queue(ts, TS_EXCLUSIVE_QUEUE))
+		    !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
 			v |= RW_LOCK_WRITE_WAITERS;
 #else
 		v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
@@ -658,7 +651,7 @@
 	 * wakeup.  If they are all spinning, then we just need to
 	 * disown the turnstile and return.
 	 */
-	if (turnstile_empty_queue(ts, queue)) {
+	if (turnstile_empty(ts, queue)) {
 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
 			CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);
 		atomic_store_rel_ptr(&rw->rw_lock, v);
@@ -672,9 +665,9 @@
 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
 		    queue == TS_SHARED_QUEUE ? "read" : "write");
-	turnstile_broadcast_queue(ts, queue);
+	turnstile_broadcast(ts, queue);
 	atomic_store_rel_ptr(&rw->rw_lock, v);
-	turnstile_unpend_queue(ts, TS_EXCLUSIVE_LOCK);
+	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
 }
 
 /*
@@ -793,20 +786,20 @@
 	if (ts == NULL)
 		v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
 	else if (v & RW_LOCK_READ_WAITERS &&
-	    turnstile_empty_queue(ts, TS_SHARED_QUEUE))
+	    turnstile_empty(ts, TS_SHARED_QUEUE))
 		v &= ~RW_LOCK_READ_WAITERS;
 	else if (v & RW_LOCK_WRITE_WAITERS &&
-	    turnstile_empty_queue(ts, TS_EXCLUSIVE_QUEUE))
+	    turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
 		v &= ~RW_LOCK_WRITE_WAITERS;
 #else
 	MPASS(ts != NULL);
 #endif
 	if (v & RW_LOCK_READ_WAITERS)
-		turnstile_broadcast_queue(ts, TS_SHARED_QUEUE);
+		turnstile_broadcast(ts, TS_SHARED_QUEUE);
 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
 	    (v & RW_LOCK_WRITE_WAITERS));
 	if (v & RW_LOCK_READ_WAITERS) {
-		turnstile_unpend_queue(ts, TS_EXCLUSIVE_LOCK);
+		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
 	} else if (ts) {
 		turnstile_disown(ts);
 		turnstile_release(&rw->lock_object);

==== //depot/projects/dtrace6/src/sys/kern/kern_sx.c#2 (text+ko) ====

@@ -40,7 +40,7 @@
 #include "opt_ddb.h"
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_sx.c,v 1.25.2.7 2007/09/10 23:43:52 alfred Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_sx.c,v 1.25.2.9 2007/09/27 20:24:55 jhb Exp $");
 
 #include <sys/param.h>
 #include <sys/ktr.h>
@@ -137,14 +137,11 @@
 void
 sx_init_flags(struct sx *sx, const char *description, int opts)
 {
-	struct lock_object *lock;
 	int flags;
 
 	MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
 	    SX_NOPROFILE | SX_ADAPTIVESPIN)) == 0);
 
-	bzero(sx, sizeof(*sx));
-
 	flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
 	if (opts & SX_DUPOK)
 		flags |= LO_DUPOK;
@@ -156,23 +153,17 @@
 	flags |= opts & (SX_ADAPTIVESPIN | SX_RECURSE);
 	sx->sx_lock = SX_LOCK_UNLOCKED;
 	sx->sx_recurse = 0;
-	lock = &sx->lock_object;
-	lock->lo_class = &lock_class_sx;
-	lock->lo_flags = flags;
-	lock->lo_name = lock->lo_type = description;
-	LOCK_LOG_INIT(lock, opts);
-	WITNESS_INIT(lock);
+	lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
 }
 
 void
 sx_destroy(struct sx *sx)
 {
-	LOCK_LOG_DESTROY(&sx->lock_object, 0);
 
 	KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
 	KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
 	sx->sx_lock = SX_LOCK_DESTROYED;
-	WITNESS_DESTROY(&sx->lock_object);
+	lock_destroy(&sx->lock_object);
 }
 
 int
@@ -377,7 +368,7 @@
 	atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
 	    (x & SX_LOCK_EXCLUSIVE_WAITERS));
 	if (x & SX_LOCK_SHARED_WAITERS)
-		sleepq_broadcast_queue(&sx->lock_object, SLEEPQ_SX, -1,
+		sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
 		    SQ_SHARED_QUEUE);
 	else
 		sleepq_release(&sx->lock_object);
@@ -520,16 +511,14 @@
 		 * lock and the exclusive waiters flag is set, we have
 		 * to sleep.
 		 */
-#if 0
 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
 			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
 			    __func__, sx);
-#endif
 
 		GIANT_SAVE();
 		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
 		    &waittime);
-		sleepq_add_queue(&sx->lock_object, NULL, sx->lock_object.lo_name,
+		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
 		if (!(opts & SX_INTERRUPTIBLE))
@@ -604,7 +593,7 @@
 		    __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
 		    "exclusive");
 	atomic_store_rel_ptr(&sx->sx_lock, x);
-	sleepq_broadcast_queue(&sx->lock_object, SLEEPQ_SX, -1, queue);
+	sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, queue);
 }
 
 /*
@@ -748,7 +737,7 @@
 		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
 		    &waittime);
 #endif
-		sleepq_add_queue(&sx->lock_object, NULL, sx->lock_object.lo_name,
+		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
 		if (!(opts & SX_INTERRUPTIBLE))
@@ -849,7 +838,7 @@
 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
 			CTR2(KTR_LOCK, "%s: %p waking up all thread on"
 			    "exclusive queue", __func__, sx);
-		sleepq_broadcast_queue(&sx->lock_object, SLEEPQ_SX, -1,
+		sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
 		    SQ_EXCLUSIVE_QUEUE);
 		break;
 	}

==== //depot/projects/dtrace6/src/sys/kern/kern_synch.c#2 (text+ko) ====

@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_synch.c,v 1.270.2.6 2006/07/06 08:32:50 glebius Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_synch.c,v 1.270.2.7 2007/09/27 20:24:55 jhb Exp $");
 
 #include "opt_ktrace.h"
 
@@ -188,7 +188,7 @@
 	 * stopped, then td will no longer be on a sleep queue upon
 	 * return from cursig().
 	 */
-	sleepq_add(ident, mtx, wmesg, flags);
+	sleepq_add(ident, mtx, wmesg, flags, 0);
 	if (timo)
 		sleepq_set_timeout(ident, timo);
 
@@ -265,7 +265,7 @@
 	/*
 	 * We put ourselves on the sleep queue and start our timeout.
 	 */
-	sleepq_add(ident, mtx, wmesg, SLEEPQ_MSLEEP);
+	sleepq_add(ident, mtx, wmesg, SLEEPQ_MSLEEP, 0);
 	if (timo)
 		sleepq_set_timeout(ident, timo);
 
@@ -314,7 +314,7 @@
 {
 
 	sleepq_lock(ident);
-	sleepq_broadcast(ident, SLEEPQ_MSLEEP, -1);
+	sleepq_broadcast(ident, SLEEPQ_MSLEEP, -1, 0);
 }
 
 /*
@@ -328,7 +328,7 @@
 {
 
 	sleepq_lock(ident);
-	sleepq_signal(ident, SLEEPQ_MSLEEP, -1);
+	sleepq_signal(ident, SLEEPQ_MSLEEP, -1, 0);
 }
 
 /*

==== //depot/projects/dtrace6/src/sys/kern/subr_sleepqueue.c#2 (text+ko) ====

@@ -60,7 +60,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/subr_sleepqueue.c,v 1.18.2.5 2007/09/04 22:40:39 alfred Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/subr_sleepqueue.c,v 1.18.2.8 2007/09/27 20:24:55 jhb Exp $");
 
 #include "opt_sleepqueue_profiling.h"
 #include "opt_ddb.h"
@@ -70,7 +70,6 @@
 #include <sys/lock.h>
 #include <sys/kernel.h>
 #include <sys/ktr.h>
-#include <sys/malloc.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/sched.h>
@@ -78,10 +77,6 @@
 #include <sys/sleepqueue.h>
 #include <sys/sysctl.h>
 
-#ifdef DDB
-#include <ddb/ddb.h>
-#endif
-
 #include <vm/uma.h>
 
 #ifdef DDB
@@ -275,7 +270,7 @@
  * woken up.
  */
 void
-sleepq_add_queue(void *wchan, struct mtx *lock, const char *wmesg, int flags,
+sleepq_add(void *wchan, struct mtx *lock, const char *wmesg, int flags,
     int queue)
 {
 	struct sleepqueue_chain *sc;
@@ -632,7 +627,7 @@
 	mtx_assert(&sched_lock, MA_OWNED);
 
 	/* Remove the thread from the queue. */
-	TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
+	TAILQ_REMOVE(&sq->sq_blocked[(int)td->td_sqqueue], td, td_slpq);
 
 	/*
 	 * Get a sleep queue for this thread.  If this is the last waiter,
@@ -710,7 +705,7 @@
  * Find the highest priority thread sleeping on a wait channel and resume it.
  */
 void
-sleepq_signal_queue(void *wchan, int flags, int pri, int queue)
+sleepq_signal(void *wchan, int flags, int pri, int queue)
 {
 	struct sleepqueue *sq;
 	struct thread *td, *besttd;
@@ -748,7 +743,7 @@
  * Resume all threads sleeping on a specified wait channel.
  */
 void
-sleepq_broadcast_queue(void *wchan, int flags, int pri, int queue)
+sleepq_broadcast(void *wchan, int flags, int pri, int queue)
 {
 	struct sleepqueue *sq;
 
@@ -910,3 +905,65 @@
 	mtx_lock_spin(&sched_lock);
 }
 
+#ifdef DDB
+DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
+{
+	struct sleepqueue_chain *sc;
+	struct sleepqueue *sq;
+#ifdef INVARIANTS
+	struct lock_object *lock;
+#endif
+	struct thread *td;
+	void *wchan;
+	int i;
+
+	if (!have_addr)
+		return;
+
+	/*
+	 * First, see if there is an active sleep queue for the wait channel
+	 * indicated by the address.
+	 */
+	wchan = (void *)addr;
+	sc = SC_LOOKUP(wchan);
+	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
+		if (sq->sq_wchan == wchan)
+			goto found;
+
+	/*
+	 * Second, see if there is an active sleep queue at the address
+	 * indicated.
+	 */
+	for (i = 0; i < SC_TABLESIZE; i++)
+		LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
+			if (sq == (struct sleepqueue *)addr)
+				goto found;
+		}
+
+	db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
+	return;
+found:
+	db_printf("Wait channel: %p\n", sq->sq_wchan);
+#ifdef INVARIANTS
+	db_printf("Queue type: %d\n", sq->sq_type);
+	if (sq->sq_lock) {
+		lock = &sq->sq_lock->mtx_object;
+		db_printf("Associated Interlock: %p - (%s) %s\n", lock,
+		    LOCK_CLASS(lock)->lc_name, lock->lo_name);
+	}
+#endif
+	db_printf("Blocked threads:\n");
+	for (i = 0; i < NR_SLEEPQS; i++) {
+		db_printf("\nQueue[%d]:\n", i);
+		if (TAILQ_EMPTY(&sq->sq_blocked[i]))
+			db_printf("\tempty\n");
+		else
+			TAILQ_FOREACH(td, &sq->sq_blocked[0],
+				      td_slpq) {
+				db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
+					  td->td_tid, td->td_proc->p_pid,
+					  td->td_proc->p_comm);
+			}
+	}
+}
+#endif

==== //depot/projects/dtrace6/src/sys/kern/subr_turnstile.c#2 (text+ko) ====

@@ -46,18 +46,18 @@
  * chain.  Each chain contains a spin mutex that protects all of the
  * turnstiles in the chain.
  *
- * Each time a thread is created, a turnstile is malloc'd and attached to
- * that thread.  When a thread blocks on a lock, if it is the first thread
- * to block, it lends its turnstile to the lock.  If the lock already has
- * a turnstile, then it gives its turnstile to the lock's turnstile's free
- * list.  When a thread is woken up, it takes a turnstile from the free list
- * if there are any other waiters.  If it is the only thread blocked on the
- * lock, then it reclaims the turnstile associated with the lock and removes
- * it from the hash table.
+ * Each time a thread is created, a turnstile is allocated from a UMA zone
+ * and attached to that thread.  When a thread blocks on a lock, if it is the
+ * first thread to block, it lends its turnstile to the lock.  If the lock
+ * already has a turnstile, then it gives its turnstile to the lock's
+ * turnstile's free list.  When a thread is woken up, it takes a turnstile from
+ * the free list if there are any other waiters.  If it is the only thread
+ * blocked on the lock, then it reclaims the turnstile associated with the lock
+ * and removes it from the hash table.
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/subr_turnstile.c,v 1.152.2.7 2007/09/05 17:38:28 alfred Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/subr_turnstile.c,v 1.152.2.11 2007/09/27 20:24:55 jhb Exp $");
 
 #include "opt_ddb.h"
 #include "opt_turnstile_profiling.h"
@@ -67,7 +67,6 @@
 #include <sys/kernel.h>
 #include <sys/ktr.h>
 #include <sys/lock.h>
-#include <sys/malloc.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/queue.h>
@@ -75,6 +74,8 @@
 #include <sys/sysctl.h>
 #include <sys/turnstile.h>
 
+#include <vm/uma.h>
+
 #ifdef DDB
 #include <sys/kdb.h>
 #include <ddb/ddb.h>
@@ -103,8 +104,9 @@
  * when it is attached to a lock.  The second list to use ts_hash is the
  * free list hung off of a turnstile that is attached to a lock.
  *
- * Each turnstile contains two lists of threads.  The ts_blocked list is
- * a linked list of threads blocked on the turnstile's lock.  The
+ * Each turnstile contains three lists of threads.  The two ts_blocked lists
+ * are linked list of threads blocked on the turnstile's lock.  One list is
+ * for exclusive waiters, and the other is for shared waiters.  The
  * ts_pending list is a linked list of threads previously awakened by
  * turnstile_signal() or turnstile_wait() that are waiting to be put on
  * the run queue.
@@ -114,9 +116,8 @@
  *  q - td_contested lock
  */
 struct turnstile {
-	/* struct mtx ts_lock;	*/		/* Spin lock for self. */
-	TAILQ_HEAD(, thread) ts_blocked[2];	/* (c + q) Blocked threads. */
-	TAILQ_HEAD(, thread) ts_pending;	/* (c) Pending threads. */
+	struct threadqueue ts_blocked[2];	/* (c + q) Blocked threads. */
+	struct threadqueue ts_pending;		/* (c) Pending threads. */
 	LIST_ENTRY(turnstile) ts_hash;		/* (c) Chain and free list. */
 	LIST_ENTRY(turnstile) ts_link;		/* (q) Contested locks. */
 	LIST_HEAD(, turnstile) ts_free;		/* (c) Free turnstiles. */
@@ -143,15 +144,8 @@
 #endif
 static struct mtx td_contested_lock;
 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
-
-/* XXX: stats, remove me */
-static u_int turnstile_nullowners;
-SYSCTL_UINT(_debug, OID_AUTO, turnstile_nullowners, CTLFLAG_RD,
-    &turnstile_nullowners, 0, "called with null owner on a shared queue");
+static uma_zone_t turnstile_zone;
 
-
-static MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles");
-
 /*
  * Prototypes for non-exported routines.
  */
@@ -162,7 +156,12 @@
 static void	propagate_priority(struct thread *td);
 static int	turnstile_adjust_thread(struct turnstile *ts,
 		    struct thread *td);
+static struct thread *turnstile_first_waiter(struct turnstile *ts);
 static void	turnstile_setowner(struct turnstile *ts, struct thread *owner);
+#ifdef INVARIANTS
+static void	turnstile_dtor(void *mem, int size, void *arg);
+#endif
+static int	turnstile_init(void *mem, int size, int flags);
 
 /*
  * Walks the chain of turnstiles and their owners to propagate the priority
@@ -184,9 +183,8 @@
 
 		if (td == NULL) {
 			/*
-			 * This really isn't quite right. Really
-			 * ought to bump priority of thread that
-			 * next acquires the lock.
+			 * This might be a read lock with no owner.  There's
+			 * not much we can do, so just bail.
 			 */
 			return;
 		}
@@ -385,6 +383,12 @@
 init_turnstile0(void *dummy)
 {
 
+	turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile),
+#ifdef INVARIANTS
+	    NULL, turnstile_dtor, turnstile_init, NULL, UMA_ALIGN_CACHE, 0);
+#else
+	    NULL, NULL, turnstile_init, NULL, UMA_ALIGN_CACHE, 0);
+#endif
 	thread0.td_turnstile = turnstile_alloc();
 }
 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
@@ -443,6 +447,8 @@
 
 	mtx_assert(&td_contested_lock, MA_OWNED);
 	MPASS(ts->ts_owner == NULL);
+
+	/* A shared lock might not have an owner. */
 	if (owner == NULL)
 		return;
 
@@ -451,20 +457,47 @@
 	LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
 }
 
+#ifdef INVARIANTS
+/*
+ * UMA zone item deallocator.
+ */
+static void
+turnstile_dtor(void *mem, int size, void *arg)
+{
+	struct turnstile *ts;
+
+	ts = mem;
+	MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
+	MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
+	MPASS(TAILQ_EMPTY(&ts->ts_pending));
+}
+#endif
+
 /*
- * Malloc a turnstile for a new thread, initialize it and return it.
+ * UMA zone item initializer.
  */
-struct turnstile *
-turnstile_alloc(void)
+static int
+turnstile_init(void *mem, int size, int flags)
 {
 	struct turnstile *ts;
 
-	ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO);
+	bzero(mem, size);
+	ts = mem;
 	TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
 	TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
 	TAILQ_INIT(&ts->ts_pending);
 	LIST_INIT(&ts->ts_free);
-	return (ts);
+	return (0);
+}
+
+/*
+ * Get a turnstile for a new thread.
+ */
+struct turnstile *
+turnstile_alloc(void)
+{
+
+	return (uma_zalloc(turnstile_zone, M_WAITOK));
 }
 
 /*
@@ -474,11 +507,7 @@
 turnstile_free(struct turnstile *ts)
 {
 
-	MPASS(ts != NULL);
-	MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
-	MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
-	MPASS(TAILQ_EMPTY(&ts->ts_pending));
-	free(ts, M_TURNSTILE);
+	uma_zfree(turnstile_zone, ts);
 }
 
 /*
@@ -582,7 +611,7 @@
  * turnstile chain locked and will return with it unlocked.
  */
 void
-turnstile_wait_queue(struct lock_object *lock, struct thread *owner, int queue)
+turnstile_wait(struct lock_object *lock, struct thread *owner, int queue)
 {
 	struct turnstile_chain *tc;
 	struct turnstile *ts;
@@ -592,12 +621,10 @@
 	tc = TC_LOOKUP(lock);
 	mtx_assert(&tc->tc_lock, MA_OWNED);
 	MPASS(td->td_turnstile != NULL);
+	if (queue == TS_SHARED_QUEUE)
+		MPASS(owner != NULL);
 	if (owner)
 		MPASS(owner->td_proc->p_magic == P_MAGIC);
-	/* XXX: stats, remove me */
-	if (!owner && queue == TS_SHARED_QUEUE) {
-		turnstile_nullowners++;
-	}
 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
 
 	/* Look up the turnstile associated with the lock 'lock'. */
@@ -642,10 +669,10 @@
 			TAILQ_INSERT_BEFORE(td1, td, td_lockq);
 		else
 			TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
+		MPASS(owner == ts->ts_owner);
 		mtx_unlock_spin(&td_contested_lock);
 		MPASS(td->td_turnstile != NULL);
 		LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
-		MPASS(owner == ts->ts_owner);
 	}
 	td->td_turnstile = NULL;
 	mtx_unlock_spin(&tc->tc_lock);
@@ -680,6 +707,7 @@
 #endif
 
 	/* Save who we are blocked on and switch. */
+	td->td_tsqueue = queue;
 	td->td_blocked = ts;
 	td->td_lockname = lock->lo_name;
 	TD_SET_LOCK(td);
@@ -705,7 +733,7 @@
  * pending list.  This must be called with the turnstile chain locked.
  */
 int
-turnstile_signal_queue(struct turnstile *ts, int queue)
+turnstile_signal(struct turnstile *ts, int queue)
 {
 	struct turnstile_chain *tc;
 	struct thread *td;
@@ -713,11 +741,10 @@
 
 	MPASS(ts != NULL);
 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
-	MPASS(ts->ts_owner == curthread);
+	MPASS(ts->ts_owner == curthread ||
+	    (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
 	tc = TC_LOOKUP(ts->ts_lockobj);
 	mtx_assert(&tc->tc_lock, MA_OWNED);
-	MPASS(ts->ts_owner == curthread ||
-	    (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
 
 	/*
@@ -757,7 +784,7 @@
  * the turnstile chain locked.
  */
 void
-turnstile_broadcast_queue(struct turnstile *ts, int queue)
+turnstile_broadcast(struct turnstile *ts, int queue)
 {
 	struct turnstile_chain *tc;
 	struct turnstile *ts1;
@@ -766,7 +793,7 @@
 	MPASS(ts != NULL);
 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
 	MPASS(ts->ts_owner == curthread ||
-		    (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
+	    (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
 	tc = TC_LOOKUP(ts->ts_lockobj);
 	mtx_assert(&tc->tc_lock, MA_OWNED);
 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
@@ -780,7 +807,7 @@
 
 	/*
 	 * Give a turnstile to each thread.  The last thread gets
-	 * this turnstile.
+	 * this turnstile if the turnstile is empty.
 	 */
 	TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
 		if (LIST_EMPTY(&ts->ts_free)) {
@@ -803,11 +830,10 @@
  * chain locked.
  */
 void
-turnstile_unpend_queue(struct turnstile *ts, int owner_type)
+turnstile_unpend(struct turnstile *ts, int owner_type)
 {
 	TAILQ_HEAD( ,thread) pending_threads;
 	struct turnstile_chain *tc;
-	struct turnstile *nts;
 	struct thread *td;
 	u_char cp, pri;
 
@@ -834,12 +860,15 @@
 	 * Remove the turnstile from this thread's list of contested locks
 	 * since this thread doesn't own it anymore.  New threads will
 	 * not be blocking on the turnstile until it is claimed by a new
-	 * owner.
+	 * owner.  There might not be a current owner if this is a shared
+	 * lock.
 	 */
-	mtx_lock_spin(&td_contested_lock);
-	ts->ts_owner = NULL;
-	LIST_REMOVE(ts, ts_link);
-	mtx_unlock_spin(&td_contested_lock);
+	if (ts->ts_owner != NULL) {
+		mtx_lock_spin(&td_contested_lock);
+		ts->ts_owner = NULL;
+		LIST_REMOVE(ts, ts_link);
+		mtx_unlock_spin(&td_contested_lock);
+	}
 	critical_enter();
 	mtx_unlock_spin(&tc->tc_lock);
 
@@ -852,8 +881,8 @@
 	pri = PRI_MAX;
 	mtx_lock_spin(&sched_lock);
 	mtx_lock_spin(&td_contested_lock);
-	LIST_FOREACH(nts, &td->td_contested, ts_link) {
-		cp = turnstile_first_waiter(nts)->td_priority;
+	LIST_FOREACH(ts, &td->td_contested, ts_link) {
+		cp = turnstile_first_waiter(ts)->td_priority;
 		if (cp < pri)
 			pri = cp;
 	}
@@ -874,6 +903,9 @@
 		if (TD_ON_LOCK(td)) {
 			td->td_blocked = NULL;
 			td->td_lockname = NULL;
+#ifdef INVARIANTS
+			td->td_tsqueue = 0xff;

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200709272315.l8RNFIAc087844>