Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 30 Jan 2005 07:47:04 GMT
From:      David Xu <davidxu@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 69959 for review
Message-ID:  <200501300747.j0U7l4cm096938@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=69959

Change 69959 by davidxu@davidxu_tiger on 2005/01/30 07:46:17

	use umtx wrapper

Affected files ...

.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_autoinit.c#4 delete
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_barrier.c#4 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_cancel.c#8 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_cond.c#12 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_exit.c#7 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_fork.c#10 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_join.c#5 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_kern.c#14 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_kill.c#4 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_list.c#4 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_mutex.c#16 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_once.c#4 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_private.h#19 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_pspinlock.c#9 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_resume_np.c#5 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_sem.c#11 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_sig.c#7 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_spinlock.c#9 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_suspend_np.c#5 edit

Differences ...

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_barrier.c#4 (text+ko) ====

@@ -28,9 +28,8 @@
 
 #include <errno.h>
 #include <stdlib.h>
-#include "namespace.h"
 #include <pthread.h>
-#include "un-namespace.h"
+
 #include "thr_private.h"
 
 __weak_reference(_pthread_barrier_init,		pthread_barrier_init);
@@ -66,7 +65,7 @@
 	if (bar == NULL)
 		return (ENOMEM);
 
-	umtx_init(&bar->b_lock);
+	_thr_umtx_init(&bar->b_lock);
 	bar->b_cycle	= 0;
 	bar->b_waiters	= 0;
 	bar->b_count	= count;
@@ -92,14 +91,14 @@
 		/* Current thread is lastest thread */
 		bar->b_waiters = 0;
 		bar->b_cycle++;
-		umtx_wake((struct umtx *)&bar->b_cycle, INT_MAX);
+		_thr_umtx_wake(&bar->b_cycle, INT_MAX);
 		THR_UMTX_UNLOCK(curthread, &bar->b_lock);
 		ret = PTHREAD_BARRIER_SERIAL_THREAD;
 	} else {
 		cycle = bar->b_cycle;
 		THR_UMTX_UNLOCK(curthread, &bar->b_lock);
 		do {
-			umtx_wait((struct umtx *)&bar->b_cycle, cycle);
+			_thr_umtx_wait(&bar->b_cycle, cycle, NULL);
 			/* test cycle to avoid bogus wakeup */
 		} while (cycle == bar->b_cycle);
 		ret = 0;

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_cancel.c#8 (text+ko) ====

@@ -63,7 +63,7 @@
 	} while (!atomic_cmpset_acq_int(&pthread->cancelflags, oldval, newval));
 
 	if (!(oldval & THR_CANCEL_NEEDED) && SHOULD_ASYNC_CANCEL(newval))
-		thr_kill(pthread->tid, SIGCANCEL);
+		_thr_send_sig(pthread, SIGCANCEL);
 
 	_thr_ref_delete(curthread, pthread);
 	_pthread_setcanceltype(oldtype, NULL);

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_cond.c#12 (text+ko) ====

@@ -30,7 +30,7 @@
 #include <errno.h>
 #include <string.h>
 #include <pthread.h>
-#include <sys/limits.h>
+#include <limits.h>
 
 #include "thr_private.h"
 
@@ -68,11 +68,17 @@
 		/*
 		 * Initialise the condition variable structure:
 		 */
-		umtx_init(&pcond->c_lock);
+		_thr_umtx_init(&pcond->c_lock);
 		pcond->c_seqno = 0;
 		pcond->c_waiters = 0;
 		pcond->c_wakeups = 0;
-		pcond->c_flags = 0;
+		if (cond_attr == NULL || *cond_attr == NULL) {
+			pcond->c_pshared = 0;
+			pcond->c_clockid = CLOCK_REALTIME;
+		} else {
+			pcond->c_pshared = (*cond_attr)->c_pshared;
+			pcond->c_clockid = (*cond_attr)->c_clockid;
+		}
 		*cond = pcond;
 	}
 	/* Return the completion status: */
@@ -162,7 +168,7 @@
 	if (cv->c_seqno != cci->seqno && cv->c_wakeups != 0) {
 		if (cv->c_waiters > 0) {
 			cv->c_seqno++;
-			umtx_wake((struct umtx *)&cv->c_seqno, 1);
+			_thr_umtx_wake(&cv->c_seqno, 1);
 		} else
 			cv->c_wakeups--;
 	} else {
@@ -178,6 +184,7 @@
 	const struct timespec *abstime, int cancel)
 {
 	struct pthread	*curthread = _get_curthread();
+	struct timespec ts, ts2, *tsp;
 	struct cond_cancel_info cci;
 	pthread_cond_t  cv;
 	long		seq, oldseq;
@@ -206,26 +213,30 @@
 
 	cv->c_waiters++;
 	do {
+		THR_LOCK_RELEASE(curthread, &cv->c_lock);
+
+		if (abstime != NULL) {
+			clock_gettime(cv->c_clockid, &ts);
+			TIMESPEC_SUB(&ts2, abstime, &ts);
+			tsp = &ts2;
+		} else
+			tsp = NULL;
+
 		if (cancel) {
 			THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &cci);
-			THR_LOCK_RELEASE(curthread, &cv->c_lock);
 			oldcancel = _thr_cancel_enter(curthread);
-			ret = umtx_timedwait((struct umtx *)&cv->c_seqno,
-				seq, abstime);
+			ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
 			_thr_cancel_leave(curthread, oldcancel);
 			THR_CLEANUP_POP(curthread, 0);
 		} else {
-			THR_LOCK_RELEASE(curthread, &cv->c_lock);
-			ret = umtx_timedwait((struct umtx *)&cv->c_seqno,
-				seq, abstime);
+			ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
 		}
+
 		THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
 		seq = cv->c_seqno;
-		if (abstime != NULL && ret != 0) {
-			if (ret == EINTR)
-				ret = ETIMEDOUT;
+		if (abstime != NULL && ret == ETIMEDOUT)
 			break;
-		}
+
 		/*
 		 * loop if we have never been told to wake up
 		 * or we lost a race.
@@ -307,12 +318,12 @@
 			cv->c_wakeups++;
 			cv->c_waiters--;
 			cv->c_seqno++;
-			umtx_wake((struct umtx *)&cv->c_seqno, 1);
+			_thr_umtx_wake(&cv->c_seqno, 1);
 		} else {
 			cv->c_wakeups += cv->c_waiters;
 			cv->c_waiters = 0;
 			cv->c_seqno++;
-			umtx_wake((struct umtx *)&cv->c_seqno, INT_MAX);
+			_thr_umtx_wake(&cv->c_seqno, INT_MAX);
 		}
 	}
 	THR_LOCK_RELEASE(curthread, &cv->c_lock);

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_exit.c#7 (text+ko) ====

@@ -129,9 +129,8 @@
 		THR_GCLIST_ADD(curthread);
 	curthread->state = PS_DEAD;
 	THREAD_LIST_UNLOCK(curthread);
-	if (curthread->joiner) {
-		umtx_wake((struct umtx *)&curthread->state, INT_MAX);
-	}
+	if (curthread->joiner)
+		_thr_umtx_wake(&curthread->state, INT_MAX);
 	thr_exit(&curthread->terminated);
 	PANIC("thr_exit() returned");
 	/* Never reach! */

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_fork.c#10 (text+ko) ====

@@ -106,13 +106,15 @@
 pid_t
 _fork(void)
 {
-	static long inprogress, waiters;
+	static umtx_t inprogress;
+	static int waiters;
+	umtx_t tmp;
 
 	struct pthread *curthread;
 	struct pthread_atfork *af;
 	pid_t ret;
-	long tmp;
-	int errsave, unlock_malloc;
+	int errsave;
+	int unlock_malloc;
 
 	if (!_thr_is_inited())
 		return (__sys_fork());
@@ -129,7 +131,7 @@
 	while (tmp) {
 		waiters++;
 		THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
-		umtx_wait((struct umtx *)&inprogress, tmp);
+		_thr_umtx_wait(&inprogress, tmp, NULL);
 		THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
 		waiters--;
 		tmp = inprogress;
@@ -169,11 +171,12 @@
 		 */
 		curthread->tlflags &= ~(TLFLAGS_IN_TDLIST | TLFLAGS_DETACHED);
 
+		/* child is a new kernel thread. */
 		thr_self(&curthread->tid);
 
 		/* clear other threads locked us. */
-		umtx_init(&curthread->lock);
-		umtx_init(&_thr_atfork_lock);
+		_thr_umtx_init(&curthread->lock);
+		_thr_umtx_init(&_thr_atfork_lock);
 		_thr_setthreaded(0);
 
 		/* reinitialize libc spinlocks, this includes __malloc_lock. */
@@ -210,7 +213,7 @@
 		THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
 		inprogress = 0;
 		if (waiters)
-			umtx_wake((struct umtx *)&inprogress, waiters);
+			_thr_umtx_wake(&inprogress, waiters);
 		THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
 	}
 	errno = errsave;

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_join.c#5 (text+ko) ====

@@ -84,7 +84,7 @@
 	oldcancel = _thr_cancel_enter(curthread);
 
 	while ((state = pthread->state) != PS_DEAD) {
-		umtx_wait((struct umtx *)&pthread->state, state);
+		_thr_umtx_wait(&pthread->state, state, NULL);
 	}
 
 	_thr_cancel_leave(curthread, oldcancel);

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_kern.c#14 (text+ko) ====

@@ -87,6 +87,12 @@
 		__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
 }
 
+int
+_thr_send_sig(struct pthread *thread, int sig)
+{
+	return thr_kill(thread->tid, sig);
+}
+
 void
 _thr_assert_lock_level()
 {

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_kill.c#4 (text+ko) ====

@@ -56,7 +56,7 @@
 	else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
 	    == 0) {
 		if (sig > 0)
-			thr_kill(pthread->tid, sig);
+			_thr_send_sig(pthread, sig);
 		_thr_ref_delete(curthread, pthread);
 	}
 

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_list.c#4 (text+ko) ====

@@ -58,8 +58,8 @@
  * after a fork().
  */
 static TAILQ_HEAD(, pthread)	free_threadq;
-static struct umtx		free_thread_lock;
-static struct umtx		tcb_lock;
+static umtx_t			free_thread_lock;
+static umtx_t			tcb_lock;
 static int			free_thread_count = 0;
 static int			inited = 0;
 static u_int64_t		next_uniqueid = 1;
@@ -69,7 +69,7 @@
 static struct thread_hash_head	thr_hashtable[HASH_QUEUES];
 #define	THREAD_HASH(thrd)	(((unsigned long)thrd >> 12) % HASH_QUEUES)
 
-static void	thr_destroy(struct pthread *curthread, struct pthread *thread);
+static void thr_destroy(struct pthread *curthread, struct pthread *thread);
 
 void
 _thr_list_init(void)
@@ -77,11 +77,11 @@
 	int i;
 
 	_gc_count = 0;
-	umtx_init(&_thr_list_lock);
+	_thr_umtx_init(&_thr_list_lock);
 	TAILQ_INIT(&_thread_list);
 	TAILQ_INIT(&free_threadq);
-	umtx_init(&free_thread_lock);
-	umtx_init(&tcb_lock);
+	_thr_umtx_init(&free_thread_lock);
+	_thr_umtx_init(&tcb_lock);
 	if (inited) {
 		for (i = 0; i < HASH_QUEUES; ++i)
 			LIST_INIT(&thr_hashtable[i]);
@@ -92,7 +92,6 @@
 void
 _thr_gc(struct pthread *curthread)
 {
-	return;
 	struct pthread *td, *td_next;
 	TAILQ_HEAD(, pthread) worklist;
 

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_mutex.c#16 (text+ko) ====

@@ -74,16 +74,18 @@
  * Prototypes
  */
 static long		mutex_handoff(struct pthread *, struct pthread_mutex *);
-static inline int	mutex_self_trylock(struct pthread *, pthread_mutex_t);
-static inline int	mutex_self_lock(struct pthread *, pthread_mutex_t,
+static int		mutex_self_trylock(struct pthread *, pthread_mutex_t);
+static int		mutex_self_lock(struct pthread *, pthread_mutex_t,
 				const struct timespec *abstime);
 static int		mutex_unlock_common(pthread_mutex_t *, int);
 static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
 static void		mutex_rescan_owned (struct pthread *, struct pthread *,
 			    struct pthread_mutex *);
-static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
-static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
-static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
+#if 0
+static pthread_t	mutex_queue_deq(pthread_mutex_t);
+#endif
+static void		mutex_queue_remove(pthread_mutex_t, pthread_t);
+static void		mutex_queue_enq(pthread_mutex_t, pthread_t);
 
 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
@@ -141,7 +143,7 @@
 		    malloc(sizeof(struct pthread_mutex))) == NULL) {
 			ret = ENOMEM;
 		} else {
-			umtx_init(&pmutex->m_lock);
+			_thr_umtx_init(&pmutex->m_lock);
 			/* Set the mutex flags: */
 			pmutex->m_flags = flags;
 
@@ -244,7 +246,7 @@
 int
 _mutex_reinit(pthread_mutex_t *mutex)
 {
-	umtx_init(&(*mutex)->m_lock);
+	_thr_umtx_init(&(*mutex)->m_lock);
 	TAILQ_INIT(&(*mutex)->m_queue);
 	MUTEX_INIT_LINK(*mutex);
 	(*mutex)->m_owner = NULL;
@@ -260,15 +262,14 @@
 {
 	struct pthread_mutex *m;
 
-	/* After fork, tid was changed, fix ownership. */
 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe) {
-		m->m_lock.u_owner = (void *)curthread->tid;
+		m->m_lock = (umtx_t)curthread->tid;
 	}
 
 	/* Clear contender for priority mutexes */
 	TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) {
 		/* clear another thread locked us */
-		umtx_init(&m->m_lock);
+		_thr_umtx_init(&m->m_lock);
 		TAILQ_INIT(&m->m_queue);
 	}
 }
@@ -276,7 +277,7 @@
 int
 _pthread_mutex_destroy(pthread_mutex_t *mutex)
 {
-	struct pthread	*curthread = _get_curthread();
+	struct pthread *curthread = _get_curthread();
 	pthread_mutex_t m;
 	int ret = 0;
 
@@ -287,7 +288,7 @@
 		 * Try to lock the mutex structure, we only need to
 		 * try once, if failed, the mutex is in used.
 		 */
-		ret = umtx_trylock(&(*mutex)->m_lock, curthread->tid);
+		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
 		if (ret)
 			return (ret);
 
@@ -299,7 +300,7 @@
 		if (((*mutex)->m_owner != NULL) ||
 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
 		    ((*mutex)->m_refcount != 0)) {
-			umtx_unlock(&(*mutex)->m_lock, curthread->tid);
+			THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
 			ret = EBUSY;
 		} else {
 			/*
@@ -310,7 +311,7 @@
 			*mutex = NULL;
 
 			/* Unlock the mutex structure: */
-			umtx_unlock(&m->m_lock, curthread->tid);
+			_thr_umtx_unlock(&m->m_lock, curthread->tid);
 
 			/*
 			 * Free the memory allocated for the mutex
@@ -335,14 +336,14 @@
 
 	/* Short cut for simple mutex. */
 	if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
-		ret = umtx_trylock(&(*mutex)->m_lock, curthread->tid);
+		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
 		if (ret == 0) {
 			(*mutex)->m_owner = curthread;
 			/* Add to the list of owned mutexes: */
 			MUTEX_ASSERT_NOT_OWNED(*mutex);
 			TAILQ_INSERT_TAIL(&curthread->mutexq,
 			    (*mutex), m_qe);
-		} else if (umtx_owner(&(*mutex)->m_lock) == curthread->tid) {
+		} else if ((*mutex)->m_owner == curthread) {
 			ret = mutex_self_trylock(curthread, *mutex);
 		} /* else {} */
 
@@ -487,6 +488,7 @@
 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
 	const struct timespec * abstime)
 {
+	struct  timespec ts, ts2;
 	long	cycle;
 	int	ret = 0;
 
@@ -501,22 +503,24 @@
 
 	if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
 		/* Default POSIX mutex: */
-		ret = umtx_trylock(&(*m)->m_lock, curthread->tid);
+		ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock);
 		if (ret == 0) {
 			(*m)->m_owner = curthread;
 			/* Add to the list of owned mutexes: */
 			MUTEX_ASSERT_NOT_OWNED(*m);
 			TAILQ_INSERT_TAIL(&curthread->mutexq,
 			    (*m), m_qe);
-		} else if (umtx_owner(&(*m)->m_lock) == curthread->tid) {
+		} else if ((*m)->m_owner == curthread) {
 			ret = mutex_self_lock(curthread, *m, abstime);
 		} else {
 			if (abstime == NULL) {
-				UMTX_LOCK(&(*m)->m_lock, curthread->tid);
+				THR_UMTX_LOCK(curthread, &(*m)->m_lock);
 				ret = 0;
 			} else {
-				ret = umtx_timedlock(&(*m)->m_lock,
-					 curthread->tid, abstime);
+				clock_gettime(CLOCK_REALTIME, &ts);
+				TIMESPEC_SUB(&ts2, abstime, &ts);
+				ret = THR_UMTX_TIMEDLOCK(curthread,
+					&(*m)->m_lock, &ts2);
 				/*
 				 * Timed out wait is not restarted if
 				 * it was interrupted, not worth to do it.
@@ -615,14 +619,12 @@
 				/* Unlock the mutex structure: */
 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
 
-				if (abstime) {
-					ret = umtx_timedwait((struct umtx *)&curthread->cycle,
-						cycle, abstime);
-					if (ret != ETIMEDOUT)
-						ret = 0;
-				} else {
-					umtx_wait((struct umtx *)&curthread->cycle, cycle);
-				}
+				clock_gettime(CLOCK_REALTIME, &ts);
+				TIMESPEC_SUB(&ts2, abstime, &ts);
+				ret = _thr_umtx_wait(&curthread->cycle, cycle,
+					 &ts2);
+				if (ret == EINTR)
+					ret = 0;
 
 				if (THR_IN_MUTEXQ(curthread)) {
 					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
@@ -700,14 +702,12 @@
 				/* Unlock the mutex structure: */
 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
 
-				if (abstime) {
-					ret = umtx_timedwait((struct umtx *)&curthread->cycle,
-						cycle, abstime);
-					if (ret != ETIMEDOUT)
-						ret = 0;
-				} else {
-					umtx_wait((struct umtx *)&curthread->cycle, cycle);
-				}
+				clock_gettime(CLOCK_REALTIME, &ts);
+				TIMESPEC_SUB(&ts2, abstime, &ts);
+				ret = _thr_umtx_wait(&curthread->cycle, cycle,
+					&ts2);
+				if (ret == EINTR)
+					ret = 0;
 
 				curthread->data.mutex = NULL;
 				if (THR_IN_MUTEXQ(curthread)) {
@@ -859,7 +859,7 @@
 	return (ret);
 }
 
-static inline int
+static int
 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
 {
 	int	ret;
@@ -888,7 +888,7 @@
 	return (ret);
 }
 
-static inline int
+static int
 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m,
 	const struct timespec *abstime)
 {
@@ -905,8 +905,8 @@
 			ret = ETIMEDOUT;
 		} else {
 			/*
-			 * POSIX specifies that mutexes should return EDEADLK if a
-			 * recursive lock is detected.
+			 * POSIX specifies that mutexes should return
+			 * EDEADLK if a recursive lock is detected.
 			 */
 			ret = EDEADLK; 
 		}
@@ -969,8 +969,7 @@
 			 * Check if the running thread is not the owner of the
 			 * mutex:
 			 */
-			if (__predict_false(umtx_owner(&(*m)->m_lock) !=
-				curthread->tid)) {
+			if (__predict_false((*m)->m_owner != curthread)) {
 				ret = EPERM;
 			} else if (__predict_false(
 				  (*m)->m_type == PTHREAD_MUTEX_RECURSIVE &&
@@ -986,11 +985,6 @@
 				 */
 				(*m)->m_count = 0;
 				(*m)->m_owner = NULL;
-				/*
-				 * XXX there should be a separated list
-				 * for owned mutex, separated it from
-				 * priority mutex list
-				 */
 				/* Remove the mutex from the threads queue. */
 				MUTEX_ASSERT_IS_OWNED(*m);
 				TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
@@ -999,9 +993,9 @@
 					(*m)->m_refcount++;
 				/*
 				 * Hand off the mutex to the next waiting
-				 * thread, XXX ignore return value.
+				 * thread.
 				 */
-				umtx_unlock(&(*m)->m_lock, curthread->tid);
+				_thr_umtx_unlock(&(*m)->m_lock, curthread->tid);
 			}
 			return (ret);
 		}
@@ -1608,7 +1602,7 @@
 
 		/* Make the thread runnable and unlock the scheduling queue: */
 		pthread->cycle++;
-		umtx_wake((struct umtx *)&pthread->cycle, 1);
+		_thr_umtx_wake(&pthread->cycle, 1);
 
 		THR_THREAD_UNLOCK(curthread, pthread);
 		if (mutex->m_owner == pthread)
@@ -1625,11 +1619,12 @@
 	return (tid);
 }
 
+#if 0
 /*
  * Dequeue a waiting thread from the head of a mutex queue in descending
  * priority order.
  */
-static inline pthread_t
+static pthread_t
 mutex_queue_deq(struct pthread_mutex *mutex)
 {
 	pthread_t pthread;
@@ -1641,11 +1636,12 @@
 
 	return (pthread);
 }
+#endif
 
 /*
  * Remove a waiting thread from a mutex queue in descending priority order.
  */
-static inline void
+static void
 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
 {
 	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
@@ -1657,7 +1653,7 @@
 /*
  * Enqueue a waiting thread to a queue in descending priority order.
  */
-static inline void
+static void
 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
 {
 	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_once.c#4 (text+ko) ====


==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_private.h#19 (text+ko) ====

@@ -40,22 +40,23 @@
 /*
  * Include files.
  */
-#include <sys/limits.h>
 #include <sys/types.h>
 #include <sys/time.h>
 #include <sys/cdefs.h>
 #include <sys/queue.h>
 #include <machine/atomic.h>
 #include <errno.h>
+#include <limits.h>
 #include <signal.h>
 #include <stdio.h>
 #include <sched.h>
 #include <unistd.h>
 #include <ucontext.h>
 #include <sys/thr.h>
-#include <sys/umtx.h>
+#include <pthread.h>
 
 #include "pthread_md.h"
+#include "thr_umtx.h"
 
 /*
  * Evaluate the storage class specifier.
@@ -113,7 +114,7 @@
 	/*
 	 * Lock for accesses to this structure.
 	 */
-	struct umtx			m_lock;
+	volatile umtx_t			m_lock;
 	enum pthread_mutextype		m_type;
 	int				m_protocol;
 	TAILQ_HEAD(mutex_head, pthread)	m_queue;
@@ -163,22 +164,24 @@
 	/*
 	 * Lock for accesses to this structure.
 	 */
-	struct umtx	c_lock;
-	volatile long	c_seqno;
-	volatile long	c_waiters;
-	volatile long	c_wakeups;
-	long		c_flags;
+	volatile umtx_t	c_lock;
+	volatile umtx_t	c_seqno;
+	volatile int	c_waiters;
+	volatile int	c_wakeups;
+	int		c_pshared;
+	int		c_clockid;
 };
 
 struct pthread_cond_attr {
-	long		c_flags;
+	int		c_pshared;
+	int		c_clockid;
 };
 
 struct pthread_barrier {
-	struct umtx	b_lock;
-	long		b_cycle;
-	int		b_count;
-	int		b_waiters;
+	volatile umtx_t	b_lock;
+	volatile umtx_t	b_cycle;
+	volatile int	b_count;
+	volatile int	b_waiters;
 };
 
 struct pthread_barrierattr {
@@ -186,7 +189,7 @@
 };
 
 struct pthread_spinlock {
-	struct	umtx	s_lock;
+	volatile umtx_t	s_lock;
 };
 
 /*
@@ -342,7 +345,7 @@
 	/*
 	 * Lock for accesses to this thread structure.
 	 */
-	struct umtx		lock;
+	umtx_t			lock;
 
 	/* Thread is terminated in kernel, written by kernel. */
 	long			terminated;
@@ -351,7 +354,7 @@
 	long			tid;
 
 	/* Internal condition variable cycle number. */
-	long			cycle;
+	umtx_t			cycle;
 
 	/* How many low level locks the thread held. */
 	int			locklevel;
@@ -400,7 +403,7 @@
 	sigset_t		sigmask;
 
 	/* Thread state: */
-	long			state;
+	umtx_t			state;
 
 	/*
 	 * Error variable used instead of errno. The function __error()
@@ -492,41 +495,32 @@
 	struct pthread_cleanup *cleanup;
 };
 
-#define UMTX_LOCK(m, tid)			\
-	do {					\
-		while (umtx_lock(m, tid))	\
-			;			\
-	} while (0)
-
 #define THR_UMTX_TRYLOCK(thrd, lck)			\
-	umtx_trylock((struct umtx *)(lck), (thrd)->tid)
+	_thr_umtx_trylock((lck), (thrd)->tid)
 
 #define	THR_UMTX_LOCK(thrd, lck)			\
-	UMTX_LOCK((struct umtx *)(lck), (thrd)->tid)	\
+	_thr_umtx_lock((lck), (thrd)->tid)
+
+#define	THR_UMTX_TIMEDLOCK(thrd, lck, timo)		\
+	_thr_umtx_timedlock((lck), (thrd)->tid, (timo))
 
 #define	THR_UMTX_UNLOCK(thrd, lck)			\
-	umtx_unlock((struct umtx *)(lck), (thrd)->tid)
+	_thr_umtx_unlock((lck), (thrd)->tid)
 
-#define THR_UMTX_TIMEDLOCK(thrd, lck, abstime)		\
-	umtx_timedlock((struct umtx *)(lck), (thrd)->tid, (abstime))
-
-#define	THR_UMTX_OWNED(thrd, lck)			\
-	(umtx_owner((struct umtx *)lck) == (thrd)->tid)
-
-#define	THR_LOCK_ACQUIRE(thrd, lck)				\
-do {								\
-	(thrd)->locklevel++;					\
-	UMTX_LOCK((struct umtx *)(lck), (thrd)->tid);		\
+#define	THR_LOCK_ACQUIRE(thrd, lck)			\
+do {							\
+	(thrd)->locklevel++;				\
+	_thr_umtx_lock(lck, (thrd)->tid);		\
 } while (0)
 
-#define	THR_LOCK_RELEASE(thrd, lck)				\
-do {								\
-	if ((thrd)->locklevel > 0) {				\
-		umtx_unlock((struct umtx *)(lck), (thrd)->tid);	\
-		(thrd)->locklevel--;				\
-	} else { 						\
-		_thr_assert_lock_level();			\
-	}							\
+#define	THR_LOCK_RELEASE(thrd, lck)			\
+do {							\
+	if ((thrd)->locklevel > 0) {			\
+		_thr_umtx_unlock((lck), (thrd)->tid);	\
+		(thrd)->locklevel--;			\
+	} else { 					\
+		_thr_assert_lock_level();		\
+	}						\
 } while (0)
 
 #define	THR_LOCK(curthrd)		THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
@@ -577,7 +571,7 @@
 	}							\
 } while (0)
 
-#define GC_NEEDED()	(atomic_load_acq_int(&_gc_count) >= 5)
+#define GC_NEEDED()	(_gc_count >= 5)
 
 #define	THR_IN_SYNCQ(thrd)	(((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
 
@@ -604,26 +598,39 @@
 SCLASS int	_thread_active_threads  SCLASS_PRESET(1);
 
 SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
-SCLASS struct umtx		_thr_atfork_lock;
+SCLASS umtx_t	_thr_atfork_lock;
 
 /* Default thread attributes: */
 SCLASS struct pthread_attr _pthread_attr_default
     SCLASS_PRESET({
-	SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
-	THR_CREATE_RUNNING, 0, NULL,
-	NULL, NULL, THR_STACK_DEFAULT, /* guardsize */0
+	.sched_policy = SCHED_RR,
+	.sched_inherit = 0,
+	.sched_interval = TIMESLICE_USEC,
+	.prio = THR_DEFAULT_PRIORITY,
+	.suspend = THR_CREATE_RUNNING,
+	.flags = 0,
+	.arg_attr = NULL,
+	.cleanup_attr = NULL,
+	.stackaddr_attr = NULL,
+	.stacksize_attr = THR_STACK_DEFAULT,
+	.guardsize_attr = 0
     });
 
 /* Default mutex attributes: */
 SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
-    SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 });
+    SCLASS_PRESET({
+	.m_type = PTHREAD_MUTEX_DEFAULT,
+	.m_protocol = PTHREAD_PRIO_NONE,
+	.m_ceiling = 0,
+	.m_flags = 0
+    });
 
 /* Default condition variable attributes: */
 SCLASS struct pthread_cond_attr _pthread_condattr_default
-    SCLASS_PRESET({PTHREAD_PROCESS_PRIVATE});
-
-/* Array of signal actions for this process: */
-SCLASS struct sigaction	_thread_sigact[_SIG_MAXSIG];
+    SCLASS_PRESET({
+	.c_pshared = PTHREAD_PROCESS_PRIVATE,
+	.c_clockid = CLOCK_REALTIME
+    });
 
 SCLASS pid_t		_thr_pid		SCLASS_PRESET(0);
 SCLASS int		_thr_guard_default;
@@ -631,11 +638,11 @@
 /* Garbage thread count. */
 SCLASS int              _gc_count               SCLASS_PRESET(0);
 
-SCLASS struct umtx	_mutex_static_lock;
-SCLASS struct umtx	_cond_static_lock;
-SCLASS struct umtx	_rwlock_static_lock;
-SCLASS struct umtx	_keytable_lock;
-SCLASS struct umtx	_thr_list_lock;
+SCLASS umtx_t		_mutex_static_lock;
+SCLASS umtx_t		_cond_static_lock;
+SCLASS umtx_t		_rwlock_static_lock;
+SCLASS umtx_t		_keytable_lock;
+SCLASS umtx_t		_thr_list_lock;
 
 /* Undefine the storage class and preset specifiers: */
 #undef  SCLASS
@@ -654,6 +661,13 @@
 void	_mutex_unlock_private(struct pthread *);
 void	_libpthread_init(struct pthread *);
 void	*_pthread_getspecific(pthread_key_t);
+int	_pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *);
+int	_pthread_cond_destroy(pthread_cond_t *);
+int	_pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
+int	_pthread_cond_timedwait(pthread_cond_t *, pthread_mutex_t *,
+	    const struct timespec *);
+int	_pthread_cond_signal(pthread_cond_t *);
+int	_pthread_cond_broadcast(pthread_cond_t *);
 int	_pthread_key_create(pthread_key_t *, void (*) (void *));
 int	_pthread_key_delete(pthread_key_t);
 int	_pthread_mutex_destroy(pthread_mutex_t *);
@@ -695,6 +709,7 @@
 void	_thr_signal_unblock(struct pthread *);
 void	_thr_signal_init(void);
 void	_thr_signal_deinit(void);
+int	_thr_send_sig(struct pthread *, int sig);
 void	_thr_list_init();
 void	_thr_hash_add(struct pthread *);
 void	_thr_hash_remove(struct pthread *);

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_pspinlock.c#9 (text+ko) ====

@@ -50,7 +50,7 @@
 	else if ((lck = malloc(sizeof(struct pthread_spinlock))) == NULL)
 		ret = ENOMEM;
 	else {
-		umtx_init(&lck->s_lock);
+		_thr_umtx_init(&lck->s_lock);
 		*lock = lck;
 		ret = 0;
 	}
@@ -77,30 +77,30 @@
 int
 _pthread_spin_trylock(pthread_spinlock_t *lock)
 {
+	struct pthread *curthread = _get_curthread();
 	struct pthread_spinlock	*lck;
-	struct pthread *self = _pthread_self();
 	int ret;
 
 	if (lock == NULL || (lck = *lock) == NULL)
 		ret = EINVAL;
 	else
-		ret = umtx_trylock(&lck->s_lock, self->tid);
+		ret = THR_UMTX_TRYLOCK(curthread, &lck->s_lock);
 	return (ret);
 }
 
 int
 _pthread_spin_lock(pthread_spinlock_t *lock)
 {
+	struct pthread *curthread = _get_curthread();
 	struct pthread_spinlock	*lck;
-	struct pthread *self = _pthread_self();
 	int ret, count;
 
 	if (lock == NULL || (lck = *lock) == NULL)
 		ret = EINVAL;
 	else {
 		count = SPIN_COUNT;
-		while ((ret = umtx_trylock(&lck->s_lock, self->tid)) != 0) {
-			while (*(volatile long *)&lck->s_lock.u_owner) {
+		while ((ret = THR_UMTX_TRYLOCK(curthread, &lck->s_lock)) != 0) {
+			while (lck->s_lock) {
 #ifdef __i386__
 				/* tell cpu we are spinning */
 				__asm __volatile("pause");
@@ -120,14 +120,14 @@
 int
 _pthread_spin_unlock(pthread_spinlock_t *lock)
 {
+	struct pthread *curthread = _get_curthread();
 	struct pthread_spinlock	*lck;
-	struct pthread *self = _pthread_self();
 	int ret;
 
 	if (lock == NULL || (lck = *lock) == NULL)
 		ret = EINVAL;
 	else {
-		ret = umtx_unlock(&lck->s_lock, self->tid);
+		ret = THR_UMTX_UNLOCK(curthread, &lck->s_lock);
 	}
 	return (ret);
 }

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_resume_np.c#5 (text+ko) ====

@@ -38,7 +38,7 @@
 __weak_reference(_pthread_resume_np, pthread_resume_np);
 __weak_reference(_pthread_resume_all_np, pthread_resume_all_np);
 
-static void inline resume_common(struct pthread *thread);
+static void resume_common(struct pthread *thread);
 
 /* Resume a thread: */
 int
@@ -79,12 +79,12 @@
 	THREAD_LIST_UNLOCK(curthread);
 }
 
-static void inline
+static void
 resume_common(struct pthread *thread)
 {
 	/* Clear the suspend flag: */

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200501300747.j0U7l4cm096938>