From owner-svn-src-all@FreeBSD.ORG Sat Feb 28 04:19:05 2015 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id F1811E87; Sat, 28 Feb 2015 04:19:04 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id D12EAD9E; Sat, 28 Feb 2015 04:19:04 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.9/8.14.9) with ESMTP id t1S4J4KY010694; Sat, 28 Feb 2015 04:19:04 GMT (envelope-from kib@FreeBSD.org) Received: (from kib@localhost) by svn.freebsd.org (8.14.9/8.14.9/Submit) id t1S4J3W5010688; Sat, 28 Feb 2015 04:19:03 GMT (envelope-from kib@FreeBSD.org) Message-Id: <201502280419.t1S4J3W5010688@svn.freebsd.org> X-Authentication-Warning: svn.freebsd.org: kib set sender to kib@FreeBSD.org using -f From: Konstantin Belousov Date: Sat, 28 Feb 2015 04:19:03 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r279390 - head/sys/kern X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18-1 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 28 Feb 2015 04:19:05 -0000 Author: kib Date: Sat Feb 28 04:19:02 2015 New Revision: 279390 URL: https://svnweb.freebsd.org/changeset/base/279390 Log: The umtx_lock mutex is used by top-half of the kernel, but is currently a spin lock. Apparently, the only reason for this is that umtx_thread_exit() is called under the process spinlock, which put the requirement on the umtx_lock. Note that the witness static order list is wrong for the umtx_lock, umtx_lock is explicitely before any thread lock, so it is also before sleepq locks. Change umtx_lock to be the sleepable mutex. For the reason above, the calls to umtx_thread_exit() are moved from thread_exit() earlier in each caller, when the process spin lock is not yet taken. Discussed with: jhb Tested by: pho (previous version) Sponsored by: The FreeBSD Foundation MFC after: 3 weeks Modified: head/sys/kern/kern_exit.c head/sys/kern/kern_kthread.c head/sys/kern/kern_thr.c head/sys/kern/kern_thread.c head/sys/kern/kern_umtx.c head/sys/kern/subr_witness.c Modified: head/sys/kern/kern_exit.c ============================================================================== --- head/sys/kern/kern_exit.c Sat Feb 28 00:31:01 2015 (r279389) +++ head/sys/kern/kern_exit.c Sat Feb 28 04:19:02 2015 (r279390) @@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #ifdef KTRACE #include #endif @@ -633,6 +634,7 @@ exit1(struct thread *td, int rv) wakeup(p->p_pptr); cv_broadcast(&p->p_pwait); sched_exit(p->p_pptr, td); + umtx_thread_exit(td); PROC_SLOCK(p); p->p_state = PRS_ZOMBIE; PROC_UNLOCK(p->p_pptr); Modified: head/sys/kern/kern_kthread.c ============================================================================== --- head/sys/kern/kern_kthread.c Sat Feb 28 00:31:01 2015 (r279389) +++ head/sys/kern/kern_kthread.c Sat Feb 28 04:19:02 2015 (r279390) @@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -339,6 +340,7 @@ kthread_exit(void) } LIST_REMOVE(curthread, td_hash); rw_wunlock(&tidhash_lock); + umtx_thread_exit(curthread); PROC_SLOCK(p); thread_exit(); } Modified: head/sys/kern/kern_thr.c ============================================================================== --- head/sys/kern/kern_thr.c Sat Feb 28 00:31:01 2015 (r279389) +++ head/sys/kern/kern_thr.c Sat Feb 28 04:19:02 2015 (r279390) @@ -322,6 +322,7 @@ sys_thr_exit(struct thread *td, struct t LIST_REMOVE(td, td_hash); rw_wunlock(&tidhash_lock); tdsigcleanup(td); + umtx_thread_exit(td); PROC_SLOCK(p); thread_stopped(p); thread_exit(); Modified: head/sys/kern/kern_thread.c ============================================================================== --- head/sys/kern/kern_thread.c Sat Feb 28 00:31:01 2015 (r279389) +++ head/sys/kern/kern_thread.c Sat Feb 28 04:19:02 2015 (r279390) @@ -413,7 +413,6 @@ thread_exit(void) #ifdef AUDIT AUDIT_SYSCALL_EXIT(0, td); #endif - umtx_thread_exit(td); /* * drop FPU & debug register state storage, or any other * architecture specific resources that @@ -864,6 +863,7 @@ thread_suspend_check(int return_instead) tidhash_remove(td); PROC_LOCK(p); tdsigcleanup(td); + umtx_thread_exit(td); PROC_SLOCK(p); thread_stopped(p); thread_exit(); Modified: head/sys/kern/kern_umtx.c ============================================================================== --- head/sys/kern/kern_umtx.c Sat Feb 28 00:31:01 2015 (r279389) +++ head/sys/kern/kern_umtx.c Sat Feb 28 04:19:02 2015 (r279390) @@ -396,7 +396,7 @@ umtxq_sysinit(void *arg __unused) #ifdef UMTX_PROFILING umtx_init_profiling(); #endif - mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN); + mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF); EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL, EVENTHANDLER_PRI_ANY); } @@ -1467,9 +1467,9 @@ umtx_pi_claim(struct umtx_pi *pi, struct struct umtx_q *uq, *uq_owner; uq_owner = owner->td_umtxq; - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); if (pi->pi_owner == owner) { - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); return (0); } @@ -1477,7 +1477,7 @@ umtx_pi_claim(struct umtx_pi *pi, struct /* * userland may have already messed the mutex, sigh. */ - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); return (EPERM); } umtx_pi_setowner(pi, owner); @@ -1491,7 +1491,7 @@ umtx_pi_claim(struct umtx_pi *pi, struct sched_lend_user_prio(owner, pri); thread_unlock(owner); } - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); return (0); } @@ -1506,7 +1506,7 @@ umtx_pi_adjust(struct thread *td, u_char struct umtx_pi *pi; uq = td->td_umtxq; - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); /* * Pick up the lock that td is blocked on. */ @@ -1515,7 +1515,7 @@ umtx_pi_adjust(struct thread *td, u_char umtx_pi_adjust_thread(pi, td); umtx_repropagate_priority(pi); } - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); } /* @@ -1537,12 +1537,12 @@ umtxq_sleep_pi(struct umtx_q *uq, struct UMTXQ_LOCKED_ASSERT(uc); KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); umtxq_insert(uq); - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); if (pi->pi_owner == NULL) { - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); /* XXX Only look up thread in current process. */ td1 = tdfind(owner, curproc->p_pid); - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); if (td1 != NULL) { if (pi->pi_owner == NULL) umtx_pi_setowner(pi, td1); @@ -1566,20 +1566,20 @@ umtxq_sleep_pi(struct umtx_q *uq, struct td->td_flags |= TDF_UPIBLOCKED; thread_unlock(td); umtx_propagate_priority(td); - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); umtxq_unbusy(&uq->uq_key); error = umtxq_sleep(uq, wmesg, timo); umtxq_remove(uq); - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); uq->uq_pi_blocked = NULL; thread_lock(td); td->td_flags &= ~TDF_UPIBLOCKED; thread_unlock(td); TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq); umtx_repropagate_priority(pi); - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); umtxq_unlock(&uq->uq_key); return (error); @@ -1611,7 +1611,7 @@ umtx_pi_unref(struct umtx_pi *pi) UMTXQ_LOCKED_ASSERT(uc); KASSERT(pi->pi_refcount > 0, ("invalid reference count")); if (--pi->pi_refcount == 0) { - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); if (pi->pi_owner != NULL) { TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link); @@ -1619,7 +1619,7 @@ umtx_pi_unref(struct umtx_pi *pi) } KASSERT(TAILQ_EMPTY(&pi->pi_blocked), ("blocked queue not empty")); - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink); umtx_pi_free(pi); } @@ -1873,11 +1873,11 @@ do_unlock_pi(struct thread *td, struct u umtxq_busy(&key); count = umtxq_count_pi(&key, &uq_first); if (uq_first != NULL) { - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); pi = uq_first->uq_pi_blocked; KASSERT(pi != NULL, ("pi == NULL?")); if (pi->pi_owner != curthread) { - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); @@ -1903,7 +1903,7 @@ do_unlock_pi(struct thread *td, struct u thread_lock(curthread); sched_lend_user_prio(curthread, pri); thread_unlock(curthread); - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); if (uq_first) umtxq_signal_thread(uq_first); } else { @@ -1920,10 +1920,10 @@ do_unlock_pi(struct thread *td, struct u * umtx_pi, and unlocked the umtxq. * If the current thread owns it, it must disown it. */ - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); if (pi->pi_owner == td) umtx_pi_disown(pi); - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); } } umtxq_unlock(&key); @@ -1986,9 +1986,9 @@ do_lock_pp(struct thread *td, struct umu goto out; } - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); if (UPRI(td) < PRI_MIN_REALTIME + ceiling) { - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); error = EINVAL; goto out; } @@ -1999,7 +1999,7 @@ do_lock_pp(struct thread *td, struct umu sched_lend_user_prio(td, uq->uq_inherited_pri); thread_unlock(td); } - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner, id | UMUTEX_CONTESTED); @@ -2034,7 +2034,7 @@ do_lock_pp(struct thread *td, struct umu umtxq_remove(uq); umtxq_unlock(&uq->uq_key); - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); uq->uq_inherited_pri = old_inherited_pri; pri = PRI_MAX; TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { @@ -2049,11 +2049,11 @@ do_lock_pp(struct thread *td, struct umu thread_lock(td); sched_lend_user_prio(td, pri); thread_unlock(td); - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); } if (error != 0) { - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); uq->uq_inherited_pri = old_inherited_pri; pri = PRI_MAX; TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { @@ -2068,7 +2068,7 @@ do_lock_pp(struct thread *td, struct umu thread_lock(td); sched_lend_user_prio(td, pri); thread_unlock(td); - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); } out: @@ -2140,7 +2140,7 @@ do_unlock_pp(struct thread *td, struct u if (error == -1) error = EFAULT; else { - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); if (su != 0) uq->uq_inherited_pri = new_inherited_pri; pri = PRI_MAX; @@ -2156,7 +2156,7 @@ do_unlock_pp(struct thread *td, struct u thread_lock(td); sched_lend_user_prio(td, pri); thread_unlock(td); - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); } umtx_key_release(&key); return (error); @@ -3841,13 +3841,13 @@ umtx_thread_cleanup(struct thread *td) if ((uq = td->td_umtxq) == NULL) return; - mtx_lock_spin(&umtx_lock); + mtx_lock(&umtx_lock); uq->uq_inherited_pri = PRI_MAX; while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) { pi->pi_owner = NULL; TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link); } - mtx_unlock_spin(&umtx_lock); + mtx_unlock(&umtx_lock); thread_lock(td); sched_lend_user_prio(td, PRI_MAX); thread_unlock(td); Modified: head/sys/kern/subr_witness.c ============================================================================== --- head/sys/kern/subr_witness.c Sat Feb 28 00:31:01 2015 (r279389) +++ head/sys/kern/subr_witness.c Sat Feb 28 04:19:02 2015 (r279390) @@ -492,6 +492,11 @@ static struct witness_order_list_entry o { "time lock", &lock_class_mtx_sleep }, { NULL, NULL }, /* + * umtx + */ + { "umtx lock", &lock_class_mtx_sleep }, + { NULL, NULL }, + /* * Sockets */ { "accept", &lock_class_mtx_sleep }, @@ -637,7 +642,6 @@ static struct witness_order_list_entry o #endif { "process slock", &lock_class_mtx_spin }, { "sleepq chain", &lock_class_mtx_spin }, - { "umtx lock", &lock_class_mtx_spin }, { "rm_spinlock", &lock_class_mtx_spin }, { "turnstile chain", &lock_class_mtx_spin }, { "turnstile lock", &lock_class_mtx_spin },