Date: Fri, 12 Nov 2010 05:22:27 +0000 (UTC) From: David Xu <davidxu@FreeBSD.org> To: src-committers@freebsd.org, svn-src-user@freebsd.org Subject: svn commit: r215165 - in user/davidxu/libthr: include lib/libthr lib/libthr/thread sys/kern sys/sys Message-ID: <201011120522.oAC5MRub096966@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: davidxu Date: Fri Nov 12 05:22:27 2010 New Revision: 215165 URL: http://svn.freebsd.org/changeset/base/215165 Log: - Move pthread types declarations into sys/sys/_pthreadtypes.h. - Rework robust mutex to only use lock-word in mutex. - Move priority ceiling into flag fields. These two changes make sizeof the pthread_mutex 24 bytes, I may still tweak it to reserve some space for future, but I may not let it exceed than 32 bytes which is the cache line size of PIII. Modified: user/davidxu/libthr/include/pthread.h user/davidxu/libthr/lib/libthr/Makefile user/davidxu/libthr/lib/libthr/thread/thr_cond.c user/davidxu/libthr/lib/libthr/thread/thr_mutex.c user/davidxu/libthr/lib/libthr/thread/thr_rwlock.c user/davidxu/libthr/lib/libthr/thread/thr_umtx.c user/davidxu/libthr/lib/libthr/thread/thr_umtx.h user/davidxu/libthr/sys/kern/kern_umtx.c user/davidxu/libthr/sys/sys/_pthreadtypes.h user/davidxu/libthr/sys/sys/_umtx.h user/davidxu/libthr/sys/sys/umtx.h Modified: user/davidxu/libthr/include/pthread.h ============================================================================== --- user/davidxu/libthr/include/pthread.h Fri Nov 12 05:13:46 2010 (r215164) +++ user/davidxu/libthr/include/pthread.h Fri Nov 12 05:22:27 2010 (r215165) @@ -41,7 +41,7 @@ #include <sys/cdefs.h> #include <sys/_pthreadtypes.h> #include <machine/_limits.h> -#include <machine/_types.h> +#include <machine/endian.h> #include <sys/_sigset.h> #include <sched.h> #include <time.h> @@ -97,15 +97,23 @@ /* * Static initialization values. */ -#define PTHREAD_MUTEX_INITIALIZER \ - {PTHREAD_MUTEX_DEFAULT, 0, 0, NULL, 0, 0x0010, {0, 0}, 0, 0} +#if BYTE_ORDER == LITTLE_ENDIAN +#define PTHREAD_MUTEX_INITIALIZER \ + {PTHREAD_MUTEX_DEFAULT, 0, 0, {NULL}, 0, 0x0020, \ + {0, 0}} +#else +#define PTHREAD_MUTEX_INITIALIZER \ + {PTHREAD_MUTEX_DEFAULT, 0, 0, {NULL}, 0, {0, 0}, \ + 0x0020} +#endif #define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \ - {PTHREAD_MUTEX_DEFAULT, 2000, 0, NULL, 0, 0x0010, {0, 0}, 0, 0} + {PTHREAD_MUTEX_DEFAULT, 2000, 0, {NULL}, 0, 0x0010, {0, 0}} #define PTHREAD_COND_INITIALIZER \ {0, 0, 0, 0, 0, 0, 0, 0, 0, CLOCK_REALTIME} -#define PTHREAD_RWLOCK_INITIALIZER { .__owner.__ownertd = 0, 0, 0, 0, 0} + +#define PTHREAD_RWLOCK_INITIALIZER { {NULL}, 0, 0, 0, 0} /* * Default attribute arguments (draft 4, deprecated). @@ -156,57 +164,6 @@ struct _pthread_cleanup_info { __uintptr_t __pthread_cleanup_pad[8]; }; -struct pthread_mutex { - __int16_t __flags; - __int16_t __spinloops; - __int32_t __recurse; - struct pthread *__ownertd; - /* kernel umtx part */ - volatile __uint32_t __lockword; - __uint32_t __lockflags; - __uint32_t __ceilings[2]; - __uint8_t __robstate; - __uint8_t __pad1; -}; - -struct pthread_cond { - __uint32_t __lock; - int __waiters; - int __signals; - __uint32_t __seq; - __uint64_t __broadcast_seq; - int __refcount; - int __destroying; - /* kernel part */ - __uint32_t __kern_has_waiters; - __uint32_t __flags; - __uint32_t __clock_id; -}; - -struct pthread_rwlock { - union { - __uint32_t __ownertid; - struct pthread *__ownertd; - char __pad[8]; - } __owner; - __uint32_t __state; - __uint32_t __flags; - __uint32_t __blocked_readers; - __uint32_t __blocked_writers; -}; - -struct pthread_barrier { - pthread_mutex_t __lock; - pthread_cond_t __cond; - __uint64_t __cycle; - __uint32_t __count; - __uint32_t __waiters; -}; - -struct pthread_spinlock { - __uint32_t __lock; -}; - /* * Thread function prototype definitions: */ Modified: user/davidxu/libthr/lib/libthr/Makefile ============================================================================== --- user/davidxu/libthr/lib/libthr/Makefile Fri Nov 12 05:13:46 2010 (r215164) +++ user/davidxu/libthr/lib/libthr/Makefile Fri Nov 12 05:22:27 2010 (r215165) @@ -40,7 +40,7 @@ MAN= libthr.3 # enable extra internal consistancy checks CFLAGS+=-D_PTHREADS_INVARIANTS -CFLAGS+=-g -O0 +#CFLAGS+=-g -O0 PRECIOUSLIB= Modified: user/davidxu/libthr/lib/libthr/thread/thr_cond.c ============================================================================== --- user/davidxu/libthr/lib/libthr/thread/thr_cond.c Fri Nov 12 05:13:46 2010 (r215164) +++ user/davidxu/libthr/lib/libthr/thread/thr_cond.c Fri Nov 12 05:22:27 2010 (r215165) @@ -170,7 +170,7 @@ cond_wait_kernel(struct pthread_cond *cv error2 = _mutex_cv_lock(mp, recurse); if (error2 == 0 && cancel) _thr_testcancel(curthread); - if (error2 == EINTR) + if (error == EINTR) error = 0; } else { /* We know that it didn't unlock the mutex. */ @@ -229,17 +229,14 @@ cond_wait_user(struct pthread_cond *cvp, _thr_umtx_lock_spin(&cvp->__lock); if (cvp->__broadcast_seq != bseq) { - cvp->__refcount--; error = 0; break; } if (cvp->__signals > 0) { - cvp->__refcount--; cvp->__signals--; error = 0; break; } else if (error == ETIMEDOUT) { - cvp->__refcount--; cvp->__waiters--; break; } else if (cancel && SHOULD_CANCEL(curthread) && @@ -255,6 +252,7 @@ cond_wait_user(struct pthread_cond *cvp, _pthread_exit(PTHREAD_CANCELED); } } + cvp->__refcount--; if (cvp->__destroying && cvp->__refcount == 0) { cvp->__destroying = 2; _thr_umtx_wake(&cvp->__destroying, INT_MAX, CV_PSHARED(cvp)); @@ -280,14 +278,14 @@ cond_wait_common(struct pthread_cond *cv /* * If the thread is real-time thread or if it holds priority mutex, - * it should use kernel based cv, because the cv internal lock - * does not protect priority, it can cause priority inversion. - * Note that if it is robust type of mutex, we should not use - * the internal lock too, because it is not robust. + * it should use kernel based cv, because the user cv's internal lock + * does not protect priority inversion, note that if mutex is robust + * type of mutex, we should not use user cv too, because the internal + * lock is not robust. */ if (curthread->attr.sched_policy != SCHED_OTHER || curthread->priority_mutex_count != 0 || - (mp->__lockflags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT| + (mp->__lockflags & (UMUTEX_PRIO_PROTECT2|UMUTEX_PRIO_INHERIT| UMUTEX_ROBUST)) != 0) return cond_wait_kernel(cvp, mp, abstime, cancel); else @@ -405,7 +403,7 @@ cond_init_old(pthread_cond_old_t *cond, int error = 0; if ((cvp = (struct pthread_cond *) - calloc(1, sizeof(struct pthread_cond))) == NULL) { + malloc(sizeof(struct pthread_cond))) == NULL) { error = ENOMEM; } else { error = cond_init(cvp, cond_attr); Modified: user/davidxu/libthr/lib/libthr/thread/thr_mutex.c ============================================================================== --- user/davidxu/libthr/lib/libthr/thread/thr_mutex.c Fri Nov 12 05:13:46 2010 (r215164) +++ user/davidxu/libthr/lib/libthr/thread/thr_mutex.c Fri Nov 12 05:22:27 2010 (r215165) @@ -61,7 +61,7 @@ int _pthread_mutex_setyieldloops_np(pthr int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); -static int mutex_self_trylock(struct pthread_mutex * ); +static int mutex_self_trylock(struct pthread_mutex *); static int mutex_self_lock(struct pthread_mutex *, const struct timespec *abstime); static int mutex_unlock_common(struct pthread_mutex *); @@ -112,6 +112,10 @@ mutex_init(struct pthread_mutex *mp, { const struct pthread_mutex_attr *attr; + /* Must align at integer boundary */ + if (((uintptr_t)mp) & 0x03) + return (EINVAL); + if (mutex_attr == NULL) { attr = &_pthread_mutexattr_default; } else { @@ -125,7 +129,7 @@ mutex_init(struct pthread_mutex *mp, } memset(mp, 0, sizeof(*mp)); mp->__flags = attr->m_type; - mp->__ownertd = NULL; + mp->__ownerdata.__ownertd = NULL; mp->__recurse = 0; mp->__spinloops = 0; switch(attr->m_protocol) { @@ -139,8 +143,8 @@ mutex_init(struct pthread_mutex *mp, mp->__lockflags = UMUTEX_PRIO_INHERIT; break; case PTHREAD_PRIO_PROTECT: - mp->__lockword = UMUTEX_CONTESTED; - mp->__lockflags = UMUTEX_PRIO_PROTECT; + mp->__lockword = UMUTEX_UNOWNED; + mp->__lockflags = UMUTEX_PRIO_PROTECT2; if (attr->m_pshared == 0) mp->__lockflags |= UMUTEX_SIMPLE; mp->__ceilings[0] = attr->m_ceiling; @@ -178,10 +182,10 @@ _mutex_fork(struct pthread *curthread) } int -_pthread_mutex_destroy(pthread_mutex_t *mutex) +_pthread_mutex_destroy(pthread_mutex_t *mp) { - int error = 0; - return (error); + memset(mp, 0, sizeof(*mp)); + return (0); } static int @@ -191,9 +195,7 @@ mutex_trylock_common(struct pthread_mute uint32_t id; int error; - if (mp->__lockflags & UMUTEX_ROBUST) - abort(); - if ((mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT | + if ((mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT)) == 0) { if (mp->__lockflags & UMUTEX_SIMPLE) id = UMUTEX_SIMPLE_OWNER; @@ -201,19 +203,19 @@ mutex_trylock_common(struct pthread_mute id = TID(curthread); if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_UNOWNED, id)) { - mp->__ownertd = curthread; + mp->__ownerdata.__ownertd = curthread; return (0); } if (mp->__lockword == UMUTEX_CONTESTED) { if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_CONTESTED, id|UMUTEX_CONTESTED)) { - mp->__ownertd = curthread; + mp->__ownerdata.__ownertd = curthread; return (0); } } - } else if (mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT)) { + } else if (mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT2)) { if (mp->__lockflags & UMUTEX_SIMPLE) { - if (mp->__ownertd == curthread) + if (mp->__ownerdata.__ownertd == curthread) return mutex_self_trylock(mp); } else { if ((mp->__lockword & UMUTEX_OWNER_MASK) == @@ -228,8 +230,7 @@ mutex_trylock_common(struct pthread_mute return (error); } else if (mp->__lockflags & UMUTEX_PRIO_INHERIT) { id = TID(curthread); - if (atomic_cmpset_acq_32(&mp->__lockflags, UMUTEX_UNOWNED, id - )) { + if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_UNOWNED, id)){ enqueue_mutex(curthread, mp); return (0); } @@ -237,7 +238,6 @@ mutex_trylock_common(struct pthread_mute return mutex_self_trylock(mp); return (EBUSY); } - return (EINVAL); } @@ -273,7 +273,7 @@ mutex_lock_sleep(struct pthread_mutex *m */ if (__predict_false( (mp->__lockflags & - (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | + (UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) != 0)) goto sleep_in_kernel; @@ -318,23 +318,21 @@ _mutex_lock_common(struct pthread_mutex struct pthread *curthread = _get_curthread(); uint32_t id; - if (mp->__lockflags & UMUTEX_ROBUST) - abort(); if ((mp->__lockflags & UMUTEX_SIMPLE) != 0) id = UMUTEX_SIMPLE_OWNER; else id = TID(curthread); - if ((mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT | + if ((mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT)) == 0) { if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_UNOWNED, id)) { - mp->__ownertd = curthread; + mp->__ownerdata.__ownertd = curthread; return (0); } if (mp->__lockword == UMUTEX_CONTESTED) { if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_CONTESTED, id|UMUTEX_CONTESTED)) { - mp->__ownertd = curthread; + mp->__ownerdata.__ownertd = curthread; return (0); } } @@ -356,7 +354,7 @@ _mutex_lock_common(struct pthread_mutex return (EINVAL); if (mp->__lockflags & UMUTEX_SIMPLE) { - if (mp->__ownertd == curthread) + if (mp->__ownerdata.__ownertd == curthread) return mutex_self_lock(mp, abstime); } else { if ((mp->__lockword & UMUTEX_OWNER_MASK) == TID(curthread)) @@ -511,7 +509,7 @@ _mutex_owned(struct pthread *curthread, * Check if the running thread is not the owner of the mutex. */ if ((mp->__lockflags & UMUTEX_SIMPLE) != 0) { - if (__predict_false(mp->__ownertd != curthread)) + if (__predict_false(mp->__ownerdata.__ownertd != curthread)) return (EPERM); } else { if ((mp->__lockword & UMUTEX_OWNER_MASK) != TID(curthread)) @@ -543,7 +541,7 @@ _mutex_unlock_common(struct pthread_mute else id = TID(curthread); - if ((mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT | + if ((mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT)) == 0) { if (atomic_cmpset_acq_32(&mp->__lockword, id, UMUTEX_UNOWNED)) { @@ -638,7 +636,7 @@ _pthread_mutex_getprioceiling(pthread_mu { int error; - if ((mp->__lockflags & UMUTEX_PRIO_PROTECT) == 0) + if ((mp->__lockflags & UMUTEX_PRIO_PROTECT2) == 0) error = EINVAL; else { *prioceiling = mp->__ceilings[0]; @@ -656,14 +654,15 @@ _pthread_mutex_setprioceiling(pthread_mu struct mutex_link *ml, *ml1, *ml2; int error; - if ((mp->__lockflags & UMUTEX_PRIO_PROTECT) == 0) + if ((mp->__lockflags & UMUTEX_PRIO_PROTECT2) == 0) return (EINVAL); error = __thr_umutex_set_ceiling((struct umutex *)&mp->__lockword, ceiling, old_ceiling); if (error != 0) return (error); - if (((mp->__lockflags & UMUTEX_SIMPLE) && (mp->__ownertd == curthread)) || + if (((mp->__lockflags & UMUTEX_SIMPLE) && + (mp->__ownerdata.__ownertd == curthread)) || (mp->__lockword & UMUTEX_OWNER_MASK) == TID(curthread)) { TAILQ_FOREACH(ml, &curthread->pp_mutexq, qe) { if (ml->mutexp == mp) @@ -705,6 +704,19 @@ _pthread_mutex_setspinloops_np(pthread_m } int +_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) +{ + *count = 0; + return (0); +} + +int +_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) +{ + return (0); +} + +int _pthread_mutex_isowned_np(pthread_mutex_t *mp) { return (_mutex_owned(_get_curthread(), mp) == 0); @@ -716,7 +728,7 @@ _pthread_mutex_consistent(pthread_mutex_ if (_mutex_owned(_get_curthread(), mp) == 0) { if (mp->__lockflags & UMUTEX_ROBUST) { - mp->__robstate = UMUTEX_ROBST_NORMAL; + atomic_clear_32(&mp->__lockword, UMUTEX_OWNER_DEAD); mp->__recurse = 0; return (0); } @@ -791,13 +803,13 @@ enqueue_mutex(struct pthread *curthread, struct mutex_link *ml; if ((mp->__lockflags & USYNC_PROCESS_SHARED) == 0) - mp->__ownertd = curthread; + mp->__ownerdata.__ownertd = curthread; /* * For PP mutex, we should restore previous priority after a PP * mutex is unlocked, so we should remember every PP mutex. */ - if ((mp->__lockflags & UMUTEX_PRIO_PROTECT) != 0) { + if ((mp->__lockflags & UMUTEX_PRIO_PROTECT2) != 0) { curthread->priority_mutex_count++; ml = _thr_mutex_link_alloc(); ml->mutexp = mp; @@ -825,9 +837,9 @@ dequeue_mutex(struct pthread *curthread, struct mutex_link *ml; if ((mp->__lockflags & USYNC_PROCESS_SHARED) == 0) - mp->__ownertd = NULL; + mp->__ownerdata.__ownertd = NULL; - if ((mp->__lockflags & UMUTEX_PRIO_PROTECT) != 0) { + if ((mp->__lockflags & UMUTEX_PRIO_PROTECT2) != 0) { curthread->priority_mutex_count--; TAILQ_FOREACH(ml, &curthread->pp_mutexq, qe) { if (ml->mutexp == mp) { @@ -868,14 +880,13 @@ _mutex_owned_old(struct pthread *curthre static int mutex_init_old(pthread_mutex_old_t *mutex, - const struct pthread_mutex_attr *mutex_attr, - void *(calloc_cb)(size_t, size_t)) + const struct pthread_mutex_attr *mutex_attr) { struct pthread_mutex *mp; int error; if ((mp = (struct pthread_mutex *) - calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) { + malloc(sizeof(struct pthread_mutex))) == NULL) { return (ENOMEM); } error = mutex_init(mp, mutex_attr); @@ -905,11 +916,10 @@ init_static(struct pthread *thread, pthr THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); if (*mutex == THR_MUTEX_INITIALIZER) { - error = mutex_init_old(mutex, &_pthread_mutexattr_default, - calloc); + error = mutex_init_old(mutex, &_pthread_mutexattr_default); } else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) { - error = mutex_init_old(mutex, &_pthread_mutexattr_adaptive_default, - calloc); + error = mutex_init_old(mutex, + &_pthread_mutexattr_adaptive_default); } else error = 0; @@ -941,7 +951,7 @@ int _pthread_mutex_init_1_0(pthread_mutex_old_t *mutex, const pthread_mutexattr_t *mutex_attr) { - return mutex_init_old(mutex, mutex_attr ? *mutex_attr : NULL, calloc); + return mutex_init_old(mutex, mutex_attr ? *mutex_attr : NULL); } int @@ -1065,7 +1075,7 @@ _pthread_mutex_getprioceiling_1_0(pthrea mp = *mutex; if ((mp <= THR_MUTEX_DESTROYED) || - (mp->__lockflags & UMUTEX_PRIO_PROTECT) == 0) + (mp->__lockflags & UMUTEX_PRIO_PROTECT2) == 0) error = EINVAL; else { *prioceiling = mp->__ceilings[0]; @@ -1083,7 +1093,7 @@ _pthread_mutex_setprioceiling_1_0(pthrea mp = *mutex; if ((mp <= THR_MUTEX_DESTROYED) || - (mp->__lockflags & UMUTEX_PRIO_PROTECT) == 0) + (mp->__lockflags & UMUTEX_PRIO_PROTECT2) == 0) return (EINVAL); return _pthread_mutex_setprioceiling(mp, ceiling, old_ceiling); } @@ -1110,10 +1120,12 @@ _pthread_mutex_init_calloc_cb(pthread_mu FB10_COMPAT(_pthread_mutex_destroy_1_0, pthread_mutex_destroy); FB10_COMPAT(_pthread_mutex_getprioceiling_1_0, pthread_mutex_getprioceiling); FB10_COMPAT(_pthread_mutex_getspinloops_np_1_0, pthread_mutex_getspinloops_np); +FB10_COMPAT(_pthread_mutex_getyieldloops_np_1_0, pthread_mutex_getyieldloops_np); FB10_COMPAT(_pthread_mutex_init_1_0, pthread_mutex_init); FB10_COMPAT(_pthread_mutex_lock_1_0, pthread_mutex_lock); -FB10_COMPAT(_pthread_mutex_setspinloops_np_1_0, pthread_mutex_setspinloops_np); FB10_COMPAT(_pthread_mutex_setprioceiling_1_0, pthread_mutex_setprioceiling); +FB10_COMPAT(_pthread_mutex_setspinloops_np_1_0, pthread_mutex_setspinloops_np); +FB10_COMPAT(_pthread_mutex_setyieldloops_np_1_0, pthread_mutex_setyieldloops_np); FB10_COMPAT(_pthread_mutex_timedlock_1_0, pthread_mutex_timedlock); FB10_COMPAT(_pthread_mutex_trylock_1_0, pthread_mutex_trylock); FB10_COMPAT(_pthread_mutex_unlock_1_0, pthread_mutex_unlock); Modified: user/davidxu/libthr/lib/libthr/thread/thr_rwlock.c ============================================================================== --- user/davidxu/libthr/lib/libthr/thread/thr_rwlock.c Fri Nov 12 05:13:46 2010 (r215164) +++ user/davidxu/libthr/lib/libthr/thread/thr_rwlock.c Fri Nov 12 05:22:27 2010 (r215165) @@ -218,9 +218,9 @@ static void rwlock_setowner(struct pthread_rwlock *rwlp, struct pthread *td) { if (!RWL_PSHARED(rwlp)) - rwlp->__owner.__ownertd = td; + rwlp->__ownerdata.__ownertd = td; else - rwlp->__owner.__ownertid = TID(td); + rwlp->__ownerdata.__ownertid = TID(td); } int @@ -310,11 +310,11 @@ _pthread_rwlock_unlock(pthread_rwlock_t state = rwlp->__state; if (state & URWLOCK_WRITE_OWNER) { if (RWL_PSHARED(rwlp) && - rwlp->__owner.__ownertid == TID(curthread)) { - rwlp->__owner.__ownertid = 0; + rwlp->__ownerdata.__ownertid == TID(curthread)) { + rwlp->__ownerdata.__ownertid = 0; } else if (!RWL_PSHARED(rwlp) && - rwlp->__owner.__ownertd == curthread) { - rwlp->__owner.__ownertd = NULL; + rwlp->__ownerdata.__ownertd == curthread) { + rwlp->__ownerdata.__ownertd = NULL; } else return (EPERM); } Modified: user/davidxu/libthr/lib/libthr/thread/thr_umtx.c ============================================================================== --- user/davidxu/libthr/lib/libthr/thread/thr_umtx.c Fri Nov 12 05:13:46 2010 (r215164) +++ user/davidxu/libthr/lib/libthr/thread/thr_umtx.c Fri Nov 12 05:22:27 2010 (r215165) @@ -59,7 +59,7 @@ __thr_umutex_lock(struct umutex *mtx, ui { uint32_t owner; - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) == 0) { for (;;) { /* wait in kernel */ @@ -84,7 +84,7 @@ __thr_umutex_timedlock(struct umutex *mt for (;;) { if ((mtx->m_flags & - (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) == 0) { + (UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) == 0) { /* wait in kernel */ ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, @@ -114,7 +114,7 @@ __thr_umutex_unlock(struct umutex *mtx, { #ifndef __ia64__ /* XXX this logic has a race-condition on ia64. */ - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) == 0) { + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) == 0) { atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, UMUTEX_CONTESTED); return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0); Modified: user/davidxu/libthr/lib/libthr/thread/thr_umtx.h ============================================================================== --- user/davidxu/libthr/lib/libthr/thread/thr_umtx.h Fri Nov 12 05:13:46 2010 (r215164) +++ user/davidxu/libthr/lib/libthr/thread/thr_umtx.h Fri Nov 12 05:22:27 2010 (r215165) @@ -32,7 +32,7 @@ #include <strings.h> #include <sys/umtx.h> -#define DEFAULT_UMUTEX {0,0,{0,0},0,{0,0,0},{0,0,0}} +#define DEFAULT_UMUTEX {0,0, {0, 0}, {0, 0, 0, 0}} #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} typedef uint32_t umtx_t; @@ -81,7 +81,7 @@ _thr_umutex_trylock(struct umutex *mtx, { if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) return (0); - if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) + if ((mtx->m_flags & UMUTEX_PRIO_PROTECT2) == 0) return (EBUSY); return (__thr_umutex_trylock(mtx)); } @@ -92,7 +92,7 @@ _thr_umutex_trylock2(struct umutex *mtx, if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) return (0); if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && - __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) + __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT)) == 0)) if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) return (0); return (EBUSY); Modified: user/davidxu/libthr/sys/kern/kern_umtx.c ============================================================================== --- user/davidxu/libthr/sys/kern/kern_umtx.c Fri Nov 12 05:13:46 2010 (r215164) +++ user/davidxu/libthr/sys/kern/kern_umtx.c Fri Nov 12 05:22:27 2010 (r215165) @@ -1247,11 +1247,54 @@ kern_umtx_wake(struct thread *td, void * return (0); } +static uint32_t +calc_lockword(uint32_t oldval, uint16_t flags, int qlen, int td_exit, int *nwake) +{ + uint32_t newval; + + if (flags & UMUTEX_ROBUST) { + if (td_exit) { + /* + * Thread is exiting, but did not unlock the mutex, + * mark it in OWNER_DEAD state. + */ + newval = (oldval & ~UMUTEX_OWNER_MASK) | UMUTEX_OWNER_DEAD; + *nwake = 1; + } else if ((oldval & UMUTEX_OWNER_DEAD) != 0) { + /* + * if user unlocks it, and previous owner was dead, + * mark it in INCONSISTENT state. + */ + newval = (oldval & ~UMUTEX_OWNER_MASK) | UMUTEX_INCONSISTENT; + *nwake = INT_MAX; + return (newval); + } else { + newval = oldval & ~UMUTEX_OWNER_MASK; + *nwake = 1; + } + } else { + *nwake = 1; + newval = oldval & ~UMUTEX_OWNER_MASK; + } + + /* + * When unlocking the umtx, it must be marked as unowned if + * there is zero or one thread only waiting for it. + * Otherwise, it must be marked as contested. + */ + if (qlen <= 1) + newval &= ~UMUTEX_CONTESTED; + else + newval |= UMUTEX_CONTESTED; + + return (newval); +} + /* * Lock PTHREAD_PRIO_NONE protocol POSIX mutex. */ static int -_do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags, int timo, +_do_lock_normal(struct thread *td, struct umutex *m, uint16_t flags, int timo, int mode) { struct umtx_q *uq; @@ -1270,42 +1313,36 @@ _do_lock_normal(struct thread *td, struc */ for (;;) { owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); - if (mode == _UMUTEX_WAIT) { - if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED) + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_MASK) == UMUTEX_INCONSISTENT) { + return (ENOTRECOVERABLE); + } + + if ((owner & UMUTEX_OWNER_MASK) == 0) { + if (mode == _UMUTEX_WAIT) return (0); - } else { /* - * Try the uncontested case. This should be done in userland. + * Try lock it. */ - owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); - + old = casuword32(&m->m_owner, owner, owner|id); /* The acquire succeeded. */ - if (owner == UMUTEX_UNOWNED) + if (owner == old) { + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_DEAD) != 0) + return (EOWNERDEAD); return (0); + } /* The address was invalid. */ - if (owner == -1) + if (old == -1) return (EFAULT); - /* If no one owns it but it is contested try to acquire it. */ - if (owner == UMUTEX_CONTESTED) { - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); - - if (owner == UMUTEX_CONTESTED) - return (0); - - /* The address was invalid. */ - if (owner == -1) - return (EFAULT); - - /* If this failed the lock has changed, restart. */ - continue; - } + /* If this failed the lock has changed, restart. */ + continue; } if ((flags & UMUTEX_ERROR_CHECK) != 0 && - (owner & ~UMUTEX_CONTESTED) == id) + (owner & UMUTEX_OWNER_MASK) == id) return (EDEADLK); if (mode == _UMUTEX_TRY) @@ -1366,19 +1403,6 @@ _do_lock_normal(struct thread *td, struc return (0); } -static void -update_robst(struct umutex *m, int td_exit) -{ - uint32_t robst = fubyte(&m->m_robstate); - - if (robst == UMUTEX_ROBST_NORMAL) { - if (td_exit) - subyte(&m->m_robstate, UMUTEX_ROBST_OWNERDEAD); - } else if (!td_exit && robst == UMUTEX_ROBST_INCONSISTENT) { - subyte(&m->m_robstate, UMUTEX_ROBST_NOTRECOVERABLE); - } -} - /* * Lock PTHREAD_PRIO_NONE protocol POSIX mutex. */ @@ -1386,13 +1410,12 @@ update_robst(struct umutex *m, int td_ex * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex. */ static int -do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, +do_unlock_normal(struct thread *td, struct umutex *m, uint16_t flags, int td_exit) { struct umtx_key key; - uint32_t owner, old, id; - int error; - int count; + uint32_t owner, old, id, newval; + int error, count, nwake; if (flags & UMUTEX_SIMPLE) id = UMUTEX_SIMPLE_OWNER; @@ -1405,22 +1428,18 @@ do_unlock_normal(struct thread *td, stru if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & UMUTEX_OWNER_MASK) != id) return (EPERM); - if ((flags & UMUTEX_ROBUST) != 0) - update_robst(m, td_exit); - - if ((owner & UMUTEX_CONTESTED) == 0) { + if ((owner & ~UMUTEX_OWNER_MASK) == 0) { + /* No other bits set, just unlock it. */ old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); if (old == -1) return (EFAULT); if (old == owner) return (0); - owner = old; } - /* We should only ever be in here for contested locks */ if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags), &key)) != 0) return (error); @@ -1429,16 +1448,13 @@ do_unlock_normal(struct thread *td, stru umtxq_busy(&key); count = umtxq_count(&key); umtxq_unlock(&key); + + owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + newval = calc_lockword(owner, flags, count, td_exit, &nwake); - /* - * When unlocking the umtx, it must be marked as unowned if - * there is zero or one thread only waiting for it. - * Otherwise, it must be marked as contested. - */ - old = casuword32(&m->m_owner, owner, - count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); + old = casuword32(&m->m_owner, owner, newval); umtxq_lock(&key); - umtxq_signal(&key,1); + umtxq_signal(&key, nwake); umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); @@ -1466,7 +1482,7 @@ do_wake_umutex(struct thread *td, struct if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != 0) + if ((owner & UMUTEX_OWNER_MASK) != 0) return (0); flags = fuword32(&m->m_flags); @@ -1485,7 +1501,7 @@ do_wake_umutex(struct thread *td, struct owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED); umtxq_lock(&key); - if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0) + if (count != 0 && (owner & UMUTEX_OWNER_MASK) == 0) umtxq_signal(&key, 1); umtxq_unbusy(&key); umtxq_unlock(&key); @@ -1887,7 +1903,7 @@ umtx_pi_insert(struct umtx_pi *pi) * Lock a PI mutex. */ static int -_do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, int timo, +_do_lock_pi(struct thread *td, struct umutex *m, uint16_t flags, int timo, int try) { struct umtx_q *uq; @@ -1929,44 +1945,38 @@ _do_lock_pi(struct thread *td, struct um * can fault on any access. */ for (;;) { - /* - * Try the uncontested case. This should be done in userland. - */ - owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); - - /* The acquire succeeded. */ - if (owner == UMUTEX_UNOWNED) { - error = 0; - break; - } - - /* The address was invalid. */ - if (owner == -1) { - error = EFAULT; + owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_MASK) == UMUTEX_INCONSISTENT) { + error = ENOTRECOVERABLE; break; } - /* If no one owns it but it is contested try to acquire it. */ - if (owner == UMUTEX_CONTESTED) { - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); - - if (owner == UMUTEX_CONTESTED) { - umtxq_lock(&uq->uq_key); - umtxq_busy(&uq->uq_key); - error = umtx_pi_claim(pi, td); - umtxq_unbusy(&uq->uq_key); - umtxq_unlock(&uq->uq_key); + if ((owner & UMUTEX_OWNER_MASK) == 0) { + old = casuword32(&m->m_owner, owner, id|owner); + /* The acquire succeeded. */ + if (owner == old) { + if ((owner & UMUTEX_CONTESTED) != 0) { + umtxq_lock(&uq->uq_key); + umtxq_busy(&uq->uq_key); + umtx_pi_claim(pi, td); + umtxq_unbusy(&uq->uq_key); + umtxq_unlock(&uq->uq_key); + } + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_DEAD) != 0) + error = EOWNERDEAD; + else + error = 0; break; } /* The address was invalid. */ - if (owner == -1) { + if (old == -1) { error = EFAULT; break; } - /* If this failed the lock has changed, restart. */ continue; } @@ -2042,9 +2052,8 @@ do_unlock_pi(struct thread *td, struct u struct umtx_key key; struct umtx_q *uq_first, *uq_first2, *uq_me; struct umtx_pi *pi, *pi2; - uint32_t owner, old, id; - int error; - int count; + uint32_t owner, old, id, newval; + int error, count, nwake; int pri; id = td->td_tid; @@ -2055,14 +2064,10 @@ do_unlock_pi(struct thread *td, struct u if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & UMUTEX_OWNER_MASK) != id) return (EPERM); - if ((flags & UMUTEX_ROBUST) != 0) - update_robst(m, td_exit); - - /* This should be done in userland */ - if ((owner & UMUTEX_CONTESTED) == 0) { + if ((owner & ~UMUTEX_OWNER_MASK) == 0) { old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); if (old == -1) return (EFAULT); @@ -2071,7 +2076,6 @@ do_unlock_pi(struct thread *td, struct u owner = old; } - /* We should only ever be in here for contested locks */ if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags), &key)) != 0) return (error); @@ -2117,13 +2121,15 @@ do_unlock_pi(struct thread *td, struct u } umtxq_unlock(&key); + owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + newval = calc_lockword(owner, flags, count, td_exit, &nwake); + /* * When unlocking the umtx, it must be marked as unowned if * there is zero or one thread only waiting for it. * Otherwise, it must be marked as contested. */ - old = casuword32(&m->m_owner, owner, - count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); + old = casuword32(&m->m_owner, owner, newval); umtxq_lock(&key); umtxq_unbusy(&key); @@ -2136,6 +2142,12 @@ do_unlock_pi(struct thread *td, struct u return (0); } +struct old_pp_mutex { + volatile __lwpid_t m_owner; /* Owner of the mutex */ + __uint32_t m_flags; /* Flags of the mutex */ + __uint32_t m_ceilings[2]; /* Priority protect ceiling */ +}; + /* * Lock a PP mutex. */ @@ -2146,8 +2158,9 @@ _do_lock_pp(struct thread *td, struct um struct umtx_q *uq, *uq2; struct umtx_pi *pi; uint32_t ceiling; - uint32_t owner, id; + uint32_t owner, id, old; int error, pri, old_inherited_pri, su; + struct old_pp_mutex *oldmtx = (struct old_pp_mutex *)m; if (flags & UMUTEX_SIMPLE) id = UMUTEX_SIMPLE_OWNER; @@ -2159,12 +2172,19 @@ _do_lock_pp(struct thread *td, struct um return (error); su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); for (;;) { - old_inherited_pri = uq->uq_inherited_pri; + /* + * We busy the lock, so no one can change the priority ceiling + * while we are locking it. + */ umtxq_lock(&uq->uq_key); umtxq_busy(&uq->uq_key); umtxq_unlock(&uq->uq_key); - ceiling = RTP_PRIO_MAX - fuword32(&m->m_ceilings[0]); + old_inherited_pri = uq->uq_inherited_pri; + if (flags & UMUTEX_PRIO_PROTECT) + ceiling = RTP_PRIO_MAX - fuword32(&oldmtx->m_ceilings[0]); + else + ceiling = RTP_PRIO_MAX - fubyte(&m->m_ceilings[0]); if (ceiling > RTP_PRIO_MAX) { error = EINVAL; goto out; @@ -2185,18 +2205,35 @@ _do_lock_pp(struct thread *td, struct um } mtx_unlock_spin(&umtx_lock); - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); - - if (owner == UMUTEX_CONTESTED) { - error = 0; +again: + owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201011120522.oAC5MRub096966>