From owner-svn-src-all@FreeBSD.ORG Tue May 29 01:52:39 2012 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 4A074106564A; Tue, 29 May 2012 01:52:39 +0000 (UTC) (envelope-from alc@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 2B3E08FC08; Tue, 29 May 2012 01:52:39 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q4T1qdVG045711; Tue, 29 May 2012 01:52:39 GMT (envelope-from alc@svn.freebsd.org) Received: (from alc@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q4T1qcl7045707; Tue, 29 May 2012 01:52:38 GMT (envelope-from alc@svn.freebsd.org) Message-Id: <201205290152.q4T1qcl7045707@svn.freebsd.org> From: Alan Cox Date: Tue, 29 May 2012 01:52:38 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r236214 - in head/sys/sparc64: include sparc64 X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 29 May 2012 01:52:39 -0000 Author: alc Date: Tue May 29 01:52:38 2012 New Revision: 236214 URL: http://svn.freebsd.org/changeset/base/236214 Log: Replace all uses of the vm page queues lock by a r/w lock that is private to this pmap.c. This new r/w lock is used primarily to synchronize access to the TTE lists. However, it will be used in a somewhat unconventional way. As finer-grained TTE list locking is added to each of the pmap functions that acquire this r/w lock, its acquisition will be changed from write to read, enabling concurrent execution of the pmap functions with finer-grained locking. Reviewed by: attilio Tested by: flo MFC after: 10 days Modified: head/sys/sparc64/include/pmap.h head/sys/sparc64/sparc64/pmap.c head/sys/sparc64/sparc64/tsb.c Modified: head/sys/sparc64/include/pmap.h ============================================================================== --- head/sys/sparc64/include/pmap.h Tue May 29 01:48:06 2012 (r236213) +++ head/sys/sparc64/include/pmap.h Tue May 29 01:52:38 2012 (r236214) @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -101,6 +102,7 @@ void pmap_set_kctx(void); extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) +extern struct rwlock tte_list_global_lock; extern vm_paddr_t phys_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; Modified: head/sys/sparc64/sparc64/pmap.c ============================================================================== --- head/sys/sparc64/sparc64/pmap.c Tue May 29 01:48:06 2012 (r236213) +++ head/sys/sparc64/sparc64/pmap.c Tue May 29 01:52:38 2012 (r236214) @@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -134,6 +135,11 @@ vm_offset_t vm_max_kernel_address; struct pmap kernel_pmap_store; /* + * Global tte list lock + */ +struct rwlock tte_list_global_lock; + +/* * Allocate physical memory for use in pmap_bootstrap. */ static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size, uint32_t colors); @@ -666,6 +672,11 @@ pmap_bootstrap(u_int cpu_impl) pm->pm_context[i] = TLB_CTX_KERNEL; CPU_FILL(&pm->pm_active); + /* + * Initialize the global tte list lock. + */ + rw_init(&tte_list_global_lock, "tte list global"); + /* * Flush all non-locked TLB entries possibly left over by the * firmware. @@ -876,7 +887,7 @@ pmap_cache_enter(vm_page_t m, vm_offset_ struct tte *tp; int color; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&tte_list_global_lock, RA_WLOCKED); KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_cache_enter: fake page")); PMAP_STATS_INC(pmap_ncache_enter); @@ -951,7 +962,7 @@ pmap_cache_remove(vm_page_t m, vm_offset struct tte *tp; int color; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&tte_list_global_lock, RA_WLOCKED); CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va, m->md.colors[DCACHE_COLOR(va)]); KASSERT((m->flags & PG_FICTITIOUS) == 0, @@ -1026,7 +1037,7 @@ pmap_kenter(vm_offset_t va, vm_page_t m) vm_page_t om; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&tte_list_global_lock, RA_WLOCKED); PMAP_STATS_INC(pmap_nkenter); tp = tsb_kvtotte(va); CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx", @@ -1088,7 +1099,7 @@ pmap_kremove(vm_offset_t va) struct tte *tp; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&tte_list_global_lock, RA_WLOCKED); PMAP_STATS_INC(pmap_nkremove); tp = tsb_kvtotte(va); CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, @@ -1139,19 +1150,16 @@ void pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va; - int locked; PMAP_STATS_INC(pmap_nqenter); va = sva; - if (!(locked = mtx_owned(&vm_page_queue_mtx))) - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); while (count-- > 0) { pmap_kenter(va, *m); va += PAGE_SIZE; m++; } - if (!locked) - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); tlb_range_demap(kernel_pmap, sva, va); } @@ -1163,18 +1171,15 @@ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; - int locked; PMAP_STATS_INC(pmap_nqremove); va = sva; - if (!(locked = mtx_owned(&vm_page_queue_mtx))) - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } - if (!locked) - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); tlb_range_demap(kernel_pmap, sva, va); } @@ -1322,7 +1327,7 @@ pmap_remove_tte(struct pmap *pm, struct vm_page_t m; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&tte_list_global_lock, RA_WLOCKED); data = atomic_readandclear_long(&tp->tte_data); if ((data & TD_FAKE) == 0) { m = PHYS_TO_VM_PAGE(TD_PA(data)); @@ -1359,7 +1364,7 @@ pmap_remove(pmap_t pm, vm_offset_t start pm->pm_context[curcpu], start, end); if (PMAP_REMOVE_DONE(pm)) return; - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); PMAP_LOCK(pm); if (end - start > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, start, end, pmap_remove_tte); @@ -1372,7 +1377,7 @@ pmap_remove(pmap_t pm, vm_offset_t start tlb_range_demap(pm, start, end - 1); } PMAP_UNLOCK(pm); - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); } void @@ -1385,7 +1390,7 @@ pmap_remove_all(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) { tpn = TAILQ_NEXT(tp, tte_link); if ((tp->tte_data & TD_PV) == 0) @@ -1408,7 +1413,7 @@ pmap_remove_all(vm_page_t m) PMAP_UNLOCK(pm); } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); } static int @@ -1470,10 +1475,10 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm vm_prot_t prot, boolean_t wired) { - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot, wired); - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); PMAP_UNLOCK(pm); } @@ -1493,7 +1498,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t vm_page_t real; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&tte_list_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pm, MA_OWNED); KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || VM_OBJECT_LOCKED(m->object), @@ -1636,14 +1641,14 @@ pmap_enter_object(pmap_t pm, vm_offset_t psize = atop(end - start); m = m_start; - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_locked(pm, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); m = TAILQ_NEXT(m, listq); } - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); PMAP_UNLOCK(pm); } @@ -1651,11 +1656,11 @@ void pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); PMAP_UNLOCK(pm); } @@ -1721,7 +1726,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm if (dst_addr != src_addr) return; - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); @@ -1739,7 +1744,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm pmap_copy_tte(src_pmap, dst_pmap, tp, va); tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1); } - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } @@ -1938,7 +1943,7 @@ pmap_page_exists_quick(pmap_t pm, vm_pag ("pmap_page_exists_quick: page %p is not managed", m)); loops = 0; rv = FALSE; - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -1949,7 +1954,7 @@ pmap_page_exists_quick(pmap_t pm, vm_pag if (++loops >= 16) break; } - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); return (rv); } @@ -1966,11 +1971,11 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED)) count++; - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); return (count); } @@ -1997,13 +2002,13 @@ pmap_page_is_mapped(vm_page_t m) rv = FALSE; if ((m->oflags & VPO_UNMANAGED) != 0) return (rv); - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) if ((tp->tte_data & TD_PV) != 0) { rv = TRUE; break; } - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); return (rv); } @@ -2029,7 +2034,7 @@ pmap_ts_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); count = 0; - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) { tpf = tp; do { @@ -2043,7 +2048,7 @@ pmap_ts_referenced(vm_page_t m) break; } while ((tp = tpn) != NULL && tp != tpf); } - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); return (count); } @@ -2066,7 +2071,7 @@ pmap_is_modified(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return (rv); - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2075,7 +2080,7 @@ pmap_is_modified(vm_page_t m) break; } } - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); return (rv); } @@ -2109,7 +2114,7 @@ pmap_is_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); rv = FALSE; - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2118,7 +2123,7 @@ pmap_is_referenced(vm_page_t m) break; } } - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); return (rv); } @@ -2141,7 +2146,7 @@ pmap_clear_modify(vm_page_t m) */ if ((m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2149,7 +2154,7 @@ pmap_clear_modify(vm_page_t m) if ((data & TD_W) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); } void @@ -2160,7 +2165,7 @@ pmap_clear_reference(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2168,7 +2173,7 @@ pmap_clear_reference(vm_page_t m) if ((data & TD_REF) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); } void @@ -2189,7 +2194,7 @@ pmap_remove_write(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2200,7 +2205,7 @@ pmap_remove_write(vm_page_t m) } } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + rw_wunlock(&tte_list_global_lock); } int Modified: head/sys/sparc64/sparc64/tsb.c ============================================================================== --- head/sys/sparc64/sparc64/tsb.c Tue May 29 01:48:06 2012 (r236213) +++ head/sys/sparc64/sparc64/tsb.c Tue May 29 01:52:38 2012 (r236214) @@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -131,7 +132,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm PMAP_STATS_INC(tsb_nenter_u_oc); } - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&tte_list_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pm, MA_OWNED); if (pm == kernel_pmap) { PMAP_STATS_INC(tsb_nenter_k);