Date: Sat, 20 Nov 2004 02:14:53 GMT From: David Xu <davidxu@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 65486 for review Message-ID: <200411200214.iAK2ErOL099137@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=65486 Change 65486 by davidxu@davidxu_alona on 2004/11/20 02:14:17 1. Use per-chain mutex lock. 2. Fix race conditions. Affected files ... .. //depot/projects/davidxu_thread/src/sys/kern/kern_umtx.c#2 edit Differences ... ==== //depot/projects/davidxu_thread/src/sys/kern/kern_umtx.c#2 (text+ko) ==== @@ -49,35 +49,67 @@ pid_t uq_pid; /* Pid key component. */ }; -#define UMTX_QUEUES 128 -#define UMTX_HASH(pid, umtx) \ - (((uintptr_t)pid + ((uintptr_t)umtx & ~65535)) % UMTX_QUEUES) +LIST_HEAD(umtx_head, umtx_q); +struct umtxq_chain { + struct umtx_head uc_queues; /* List of sleep queues. */ + struct mtx uc_lock; /* lock for this chain. */ +}; + +#define GOLDEN_RATIO_PRIME 2654404609U +#define UMTX_QUEUES 128 +#define UMTX_SHIFTS (__WORD_BIT - 7) -LIST_HEAD(umtx_head, umtx_q); -static struct umtx_head queues[UMTX_QUEUES]; +static struct umtxq_chain umtxq_chains[UMTX_QUEUES]; static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); -static struct mtx umtx_lock; -MTX_SYSINIT(umtx, &umtx_lock, "umtx", MTX_DEF); +#define UMTX_LOCK(chain) \ + mtx_lock(&umtxq_chains[chain].uc_lock) +#define UMTX_UNLOCK(chain) \ + mtx_unlock(&umtxq_chains[chain].uc_lock) +#define UMTX_MUTEX(chain) \ + (&umtxq_chains[chain].uc_lock) + +#define UMTX_CONTESTED LONG_MIN + +static void umtx_queues_init(void *); +static int umtx_hash(unsigned, void *); +static struct umtx_q *umtx_lookup(struct thread *, struct umtx *, int); +static struct umtx_q *umtx_insert(struct thread *, struct umtx *, int); + +SYSINIT(umtx, SI_SUB_LOCK, SI_ORDER_MIDDLE, umtx_queues_init, NULL); + +static void +umtx_queues_init(void *arg) +{ + int i; -#define UMTX_LOCK() mtx_lock(&umtx_lock); -#define UMTX_UNLOCK() mtx_unlock(&umtx_lock); + for (i = 0; i < UMTX_QUEUES; ++i) { + LIST_INIT(&umtxq_chains[i].uc_queues); + mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL, + MTX_DEF | MTX_DUPOK); + } +} -#define UMTX_CONTESTED LONG_MIN +static inline int +umtx_hash(unsigned pid, void *umtx) +{ + unsigned n = (unsigned)umtx + pid; -static struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx); -static struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx); + return (n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS; +} static struct umtx_q * -umtx_lookup(struct thread *td, struct umtx *umtx) +umtx_lookup(struct thread *td, struct umtx *umtx, int chain) { struct umtx_head *head; struct umtx_q *uq; pid_t pid; + mtx_assert(UMTXQ_MUTEX(chain), MA_OWNED); + pid = td->td_proc->p_pid; - head = &queues[UMTX_HASH(td->td_proc->p_pid, umtx)]; + head = &umtxq_chains[chain].uc_queues; LIST_FOREACH(uq, head, uq_next) { if (uq->uq_pid == pid && uq->uq_umtx == umtx) @@ -91,7 +123,7 @@ * Insert a thread onto the umtx queue. */ static struct umtx_q * -umtx_insert(struct thread *td, struct umtx *umtx) +umtx_insert(struct thread *td, struct umtx *umtx, int chain) { struct umtx_head *head; struct umtx_q *uq; @@ -99,19 +131,19 @@ pid = td->td_proc->p_pid; - if ((uq = umtx_lookup(td, umtx)) == NULL) { + if ((uq = umtx_lookup(td, umtx, chain)) == NULL) { struct umtx_q *ins; - UMTX_UNLOCK(); + UMTX_UNLOCK(chain); ins = malloc(sizeof(*uq), M_UMTX, M_ZERO | M_WAITOK); - UMTX_LOCK(); + UMTX_LOCK(chain); /* * Some one else could have succeeded while we were blocked * waiting on memory. */ - if ((uq = umtx_lookup(td, umtx)) == NULL) { - head = &queues[UMTX_HASH(pid, umtx)]; + if ((uq = umtx_lookup(td, umtx, chain)) == NULL) { + head = &umtxq_chains[chain].uc_queues; uq = ins; uq->uq_pid = pid; uq->uq_umtx = umtx; @@ -130,14 +162,17 @@ } static void -umtx_remove(struct umtx_q *uq, struct thread *td) +umtx_remove(struct umtx_q *uq, struct thread *td, struct umtx *umtx, + int chain) { TAILQ_REMOVE(&uq->uq_tdq, td, td_umtx); if (TAILQ_EMPTY(&uq->uq_tdq)) { LIST_REMOVE(uq, uq_next); + UMTX_UNLOCK(chain); free(uq, M_UMTX); - } + } else + UMTX_UNLOCK(chain); } int @@ -148,7 +183,8 @@ struct umtx *umtx; intptr_t owner; intptr_t old; - int error; + int chain; + int error = 0; uq = NULL; @@ -157,6 +193,7 @@ * can fault on any access. */ umtx = uap->umtx; + chain = umtx_hash(td->td_proc->p_pid, umtx); for (;;) { /* @@ -165,34 +202,40 @@ owner = casuptr((intptr_t *)&umtx->u_owner, UMTX_UNOWNED, td->td_tid); + /* The acquire succeeded. */ + if (owner == UMTX_UNOWNED) + return (0); + /* The address was invalid. */ if (owner == -1) return (EFAULT); - /* The acquire succeeded. */ - if (owner == UMTX_UNOWNED) - return (0); - /* If no one owns it but it is contested try to acquire it. */ if (owner == UMTX_CONTESTED) { owner = casuptr((intptr_t *)&umtx->u_owner, UMTX_CONTESTED, td->td_tid | UMTX_CONTESTED); + if (owner == UMTX_CONTESTED) + return (0); + /* The address was invalid. */ if (owner == -1) return (EFAULT); - if (owner == UMTX_CONTESTED) - return (0); - /* If this failed the lock has changed, restart. */ continue; } + /* + * If we caught a signal, we have retried and now + * exit immediately. + */ + if (error) + return (error); - UMTX_LOCK(); - uq = umtx_insert(td, umtx); - UMTX_UNLOCK(); + UMTX_LOCK(chain); + uq = umtx_insert(td, umtx, chain); + UMTX_UNLOCK(chain); /* * Set the contested bit so that a release in user space @@ -205,9 +248,8 @@ /* The address was invalid. */ if (old == -1) { - UMTX_LOCK(); - umtx_remove(uq, td); - UMTX_UNLOCK(); + UMTX_LOCK(chain); + umtx_remove(uq, td, umtx, chain); return (EFAULT); } @@ -216,24 +258,28 @@ * and we need to retry or we lost a race to the thread * unlocking the umtx. */ - PROC_LOCK(td->td_proc); + UMTX_LOCK(chain); if (old == owner && (td->td_flags & TDF_UMTXWAKEUP) == 0) - error = msleep(td, &td->td_proc->p_mtx, + error = msleep(td, UMTX_MUTEX(chain), td->td_priority | PCATCH, "umtx", 0); else error = 0; - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_UMTXWAKEUP; - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(td->td_proc); + umtx_remove(uq, td, umtx, chain); - UMTX_LOCK(); - umtx_remove(uq, td); - UMTX_UNLOCK(); + if (td->td_flags & TDF_UMTXWAKEUP) { + /* + * if we were resumed by umtx_unlock, we should retry + * to avoid a race. + */ + mtx_lock_spin(&sched_lock); + td->td_flags &= ~TDF_UMTXWAKEUP; + mtx_unlock_spin(&sched_lock); + continue; + } /* - * If we caught a signal we might have to retry or exit - * immediately. + * If we caught a signal without resumed by umtx_unlock, + * exit immediately. */ if (error) return (error); @@ -246,11 +292,12 @@ _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap) /* struct umtx *umtx */ { - struct thread *blocked; + struct thread *blocked, *blocked2; struct umtx *umtx; struct umtx_q *uq; intptr_t owner; intptr_t old; + int chain; umtx = uap->umtx; @@ -269,63 +316,72 @@ /* We should only ever be in here for contested locks */ if ((owner & UMTX_CONTESTED) == 0) return (EINVAL); - blocked = NULL; /* * When unlocking the umtx, it must be marked as unowned if * there is zero or one thread only waiting for it. * Otherwise, it must be marked as contested. */ - UMTX_LOCK(); - uq = umtx_lookup(td, umtx); - if (uq == NULL || - (uq != NULL && (blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL && - TAILQ_NEXT(blocked, td_umtx) == NULL)) { - UMTX_UNLOCK(); - old = casuptr((intptr_t *)&umtx->u_owner, owner, - UMTX_UNOWNED); - if (old == -1) - return (EFAULT); - if (old != owner) - return (EINVAL); + old = casuptr((intptr_t *)&umtx->u_owner, owner, UMTX_UNOWNED); + if (old == -1) + return (EFAULT); + if (old != owner) + return (EINVAL); + + chain = umtx_hash(td->td_proc->p_pid, umtx); + + /* + * At the point, a new thread can lock the umtx before we + * reach here, so contested bit will not be set, if there + * are two or more threads on wait queue, we should set + * contensted bit for them. + */ + blocked = NULL; + UMTX_LOCK(chain); + uq = umtx_lookup(td, umtx, chain); + if (uq == NULL) { + UMTX_UNLOCK(chain); + return (0); + } + blocked2 = NULL; + if ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL) { + mtx_lock_spin(&sched_lock); + blocked->td_flags |= TDF_UMTXWAKEUP; + mtx_unlock_spin(&sched_lock); + blocked2 = TAILQ_NEXT(blocked, td_umtx); + } + UMTX_UNLOCK(chain); + /* + * If there is second thread waiting on umtx, set contested bit, + * if they are resumed before we reach here, it is harmless, + * just a bit nonefficient. + */ + if (blocked2 != NULL) { + owner = UMTX_UNOWNED; + for (;;) { + old = casuptr((intptr_t *)&umtx->u_owner, owner, + owner | UMTX_CONTESTED); + if (old == -1) + return (EFAULT); + if (old == owner) + break; + owner = old; + } /* - * Recheck the umtx queue to make sure another thread - * didn't put itself on it after it was unlocked. + * Another thread locked the umtx before us, so don't bother + * to wake more threads, that thread will do it when it unlocks + * the umtx. */ - UMTX_LOCK(); - uq = umtx_lookup(td, umtx); - if (uq != NULL && - ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL && - TAILQ_NEXT(blocked, td_umtx) != NULL)) { - UMTX_UNLOCK(); - old = casuptr((intptr_t *)&umtx->u_owner, - UMTX_UNOWNED, UMTX_CONTESTED); - } else { - UMTX_UNLOCK(); - } - } else { - UMTX_UNLOCK(); - old = casuptr((intptr_t *)&umtx->u_owner, - owner, UMTX_CONTESTED); - if (old != -1 && old != owner) - return (EINVAL); + if ((owner & ~UMTX_CONTESTED) != 0) + return (0); } - if (old == -1) - return (EFAULT); - /* * If there is a thread waiting on the umtx, wake it up. */ - if (blocked != NULL) { - PROC_LOCK(blocked->td_proc); - mtx_lock_spin(&sched_lock); - blocked->td_flags |= TDF_UMTXWAKEUP; - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(blocked->td_proc); + if (blocked != NULL) wakeup(blocked); - } return (0); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200411200214.iAK2ErOL099137>