From owner-svn-src-all@FreeBSD.ORG Mon Jul 2 07:01:42 2012 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 87F74106566C; Mon, 2 Jul 2012 07:01:42 +0000 (UTC) (envelope-from alc@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 71EA78FC0C; Mon, 2 Jul 2012 07:01:42 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q6271gQ9052252; Mon, 2 Jul 2012 07:01:42 GMT (envelope-from alc@svn.freebsd.org) Received: (from alc@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q6271g5E052249; Mon, 2 Jul 2012 07:01:42 GMT (envelope-from alc@svn.freebsd.org) Message-Id: <201207020701.q6271g5E052249@svn.freebsd.org> From: Alan Cox Date: Mon, 2 Jul 2012 07:01:42 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-9@freebsd.org X-SVN-Group: stable-9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r237966 - in stable/9/sys: amd64/amd64 i386/i386 X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 02 Jul 2012 07:01:42 -0000 Author: alc Date: Mon Jul 2 07:01:41 2012 New Revision: 237966 URL: http://svn.freebsd.org/changeset/base/237966 Log: MFC r235695, r236158, r236190, r236494 Replace all uses of the vm page queues lock by a r/w lock that is private to this pmap.c. Modified: stable/9/sys/amd64/amd64/pmap.c stable/9/sys/i386/i386/pmap.c Directory Properties: stable/9/sys/ (props changed) Modified: stable/9/sys/amd64/amd64/pmap.c ============================================================================== --- stable/9/sys/amd64/amd64/pmap.c Mon Jul 2 06:58:10 2012 (r237965) +++ stable/9/sys/amd64/amd64/pmap.c Mon Jul 2 07:01:41 2012 (r237966) @@ -117,6 +117,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -199,6 +200,17 @@ static u_int64_t DMPDphys; /* phys addr static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ /* + * Isolate the global pv list lock from data and other locks to prevent false + * sharing within the cache. + */ +static struct { + struct rwlock lock; + char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; +} pvh_global __aligned(CACHE_LINE_SIZE); + +#define pvh_global_lock pvh_global.lock + +/* * Data for the pv entry allocation mechanism */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); @@ -582,6 +594,11 @@ pmap_bootstrap(vm_paddr_t *firstaddr) CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ TAILQ_INIT(&kernel_pmap->pm_pvchunk); + /* + * Initialize the global pv list lock. + */ + rw_init(&pvh_global_lock, "pvh global"); + /* * Reserve some special page table entries/VA space for temporary * mapping of pages. @@ -1652,9 +1669,9 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (flags & M_WAITOK) { PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); VM_WAIT; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } @@ -2056,7 +2073,7 @@ pmap_pv_reclaim(pmap_t locked_pmap) uint64_t inuse, freemask; int bit, field, freed; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); pmap = NULL; free = m_pc = NULL; @@ -2171,7 +2188,7 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv struct pv_chunk *pc; int idx, field, bit; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(pv_entry_frees++); PV_STAT(pv_entry_spare++); @@ -2222,8 +2239,8 @@ get_pv_entry(pmap_t pmap, boolean_t try) struct pv_chunk *pc; vm_page_t m; + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); PV_STAT(pv_entry_allocs++); retry: pc = TAILQ_FIRST(&pmap->pm_pvchunk); @@ -2292,7 +2309,7 @@ pmap_pvh_remove(struct md_page *pvh, pma { pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); @@ -2315,7 +2332,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offse vm_offset_t va_last; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT((pa & PDRMASK) == 0, ("pmap_pv_demote_pde: pa is not 2mpage aligned")); @@ -2353,7 +2370,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offs vm_offset_t va_last; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT((pa & PDRMASK) == 0, ("pmap_pv_promote_pde: pa is not 2mpage aligned")); @@ -2399,7 +2416,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t { struct md_page *pvh; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -2417,8 +2434,8 @@ pmap_insert_entry(pmap_t pmap, vm_offset { pv_entry_t pv; + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); pv = get_pv_entry(pmap, FALSE); pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); @@ -2432,8 +2449,8 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm { pv_entry_t pv; + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((pv = get_pv_entry(pmap, TRUE)) != NULL) { pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); @@ -2451,7 +2468,7 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offse struct md_page *pvh; pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); if ((pv = get_pv_entry(pmap, TRUE)) != NULL) { pv->pv_va = va; pvh = pa_to_pvh(pa); @@ -2719,7 +2736,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva anyvalid = 0; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); /* @@ -2829,7 +2846,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva out: if (anyvalid) pmap_invalidate_all(pmap); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); pmap_free_zero_pages(free); } @@ -2861,7 +2878,7 @@ pmap_remove_all(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); free = NULL; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -2900,7 +2917,7 @@ small_mappings: PMAP_UNLOCK(pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); pmap_free_zero_pages(free); } @@ -3022,12 +3039,12 @@ resume: } else { if (!pv_lists_locked) { pv_lists_locked = TRUE; - if (!mtx_trylock(&vm_page_queue_mtx)) { + if (!rw_try_wlock(&pvh_global_lock)) { if (anychanged) pmap_invalidate_all( pmap); PMAP_UNLOCK(pmap); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); goto resume; } } @@ -3078,7 +3095,7 @@ retry: if (anychanged) pmap_invalidate_all(pmap); if (pv_lists_locked) - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3233,7 +3250,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, mpte = NULL; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); /* @@ -3389,7 +3406,7 @@ validate: vm_reserv_level_iffullpop(m) == 0) pmap_promote_pde(pmap, pde, va); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3405,7 +3422,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t pd_entry_t *pde, newpde; vm_page_t free, mpde; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((mpde = pmap_allocpde(pmap, va, M_NOWAIT)) == NULL) { CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" @@ -3486,7 +3503,7 @@ pmap_enter_object(pmap_t pmap, vm_offset psize = atop(end - start); mpte = NULL; m = m_start; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { va = start + ptoa(diff); @@ -3500,7 +3517,7 @@ pmap_enter_object(pmap_t pmap, vm_offset mpte); m = TAILQ_NEXT(m, listq); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3517,10 +3534,10 @@ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3535,7 +3552,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* @@ -3754,9 +3771,9 @@ retry: if (!wired != ((*pde & PG_W) == 0)) { if (!are_queues_locked) { are_queues_locked = TRUE; - if (!mtx_trylock(&vm_page_queue_mtx)) { + if (!rw_try_wlock(&pvh_global_lock)) { PMAP_UNLOCK(pmap); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); goto retry; } } @@ -3775,7 +3792,7 @@ retry: } out: if (are_queues_locked) - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3799,7 +3816,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm if (dst_addr != src_addr) return; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); @@ -3915,7 +3932,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm } } out: - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } @@ -3996,7 +4013,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (PV_PMAP(pv) == pmap) { rv = TRUE; @@ -4018,7 +4035,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p break; } } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -4036,13 +4053,13 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); count = pmap_pvh_wired_mappings(&m->md, count); if ((m->flags & PG_FICTITIOUS) == 0) { count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (count); } @@ -4058,7 +4075,7 @@ pmap_pvh_wired_mappings(struct md_page * pt_entry_t *pte; pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -4081,11 +4098,11 @@ pmap_page_is_mapped(vm_page_t m) if ((m->oflags & VPO_UNMANAGED) != 0) return (FALSE); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || ((m->flags & PG_FICTITIOUS) == 0 && !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -4116,7 +4133,7 @@ pmap_remove_pages(pmap_t pmap) printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { allfree = 1; @@ -4220,7 +4237,7 @@ pmap_remove_pages(pmap_t pmap) } } pmap_invalidate_all(pmap); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); pmap_free_zero_pages(free); } @@ -4248,11 +4265,11 @@ pmap_is_modified(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); rv = pmap_is_modified_pvh(&m->md) || ((m->flags & PG_FICTITIOUS) == 0 && pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -4269,7 +4286,7 @@ pmap_is_modified_pvh(struct md_page *pvh pmap_t pmap; boolean_t rv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); rv = FALSE; TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -4320,11 +4337,11 @@ pmap_is_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); rv = pmap_is_referenced_pvh(&m->md) || ((m->flags & PG_FICTITIOUS) == 0 && pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -4340,7 +4357,7 @@ pmap_is_referenced_pvh(struct md_page *p pmap_t pmap; boolean_t rv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); rv = FALSE; TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -4379,7 +4396,7 @@ pmap_remove_write(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -4413,7 +4430,7 @@ retry: PMAP_UNLOCK(pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -4441,7 +4458,7 @@ pmap_ts_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -4499,7 +4516,7 @@ small_mappings: } while ((pv = pvn) != NULL && pv != pvf); } out: - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rtval); } @@ -4529,7 +4546,7 @@ pmap_clear_modify(vm_page_t m) */ if ((m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -4578,7 +4595,7 @@ small_mappings: } PMAP_UNLOCK(pmap); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -4598,7 +4615,7 @@ pmap_clear_reference(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -4638,7 +4655,7 @@ small_mappings: } PMAP_UNLOCK(pmap); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* Modified: stable/9/sys/i386/i386/pmap.c ============================================================================== --- stable/9/sys/i386/i386/pmap.c Mon Jul 2 06:58:10 2012 (r237965) +++ stable/9/sys/i386/i386/pmap.c Mon Jul 2 07:01:41 2012 (r237966) @@ -118,6 +118,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -231,6 +232,17 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_ena static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ /* + * Isolate the global pv list lock from data and other locks to prevent false + * sharing within the cache. + */ +static struct { + struct rwlock lock; + char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; +} pvh_global __aligned(CACHE_LINE_SIZE); + +#define pvh_global_lock pvh_global.lock + +/* * Data for the pv entry allocation mechanism */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); @@ -393,6 +405,12 @@ pmap_bootstrap(vm_paddr_t firstaddr) kernel_pmap->pm_root = NULL; CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ TAILQ_INIT(&kernel_pmap->pm_pvchunk); + + /* + * Initialize the global pv list lock. + */ + rw_init(&pvh_global_lock, "pvh global"); + LIST_INIT(&allpmaps); /* @@ -1277,7 +1295,7 @@ invlcaddr(void *caddr) * scans are across different pmaps. It is very wasteful * to do an entire invltlb for checking a single mapping. * - * If the given pmap is not the current pmap, vm_page_queue_mtx + * If the given pmap is not the current pmap, pvh_global_lock * must be held and curthread pinned to a CPU. */ static pt_entry_t * @@ -1293,7 +1311,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t /* are we current address space or kernel? */ if (pmap_is_current(pmap)) return (vtopte(va)); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); newpf = *pde & PG_FRAME; if ((*PMAP1 & PG_FRAME) != newpf) { @@ -1842,9 +1860,9 @@ _pmap_allocpte(pmap_t pmap, u_int ptepin VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (flags & M_WAITOK) { PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); VM_WAIT; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } @@ -2339,7 +2357,7 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv struct pv_chunk *pc; int idx, field, bit; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(pv_entry_frees++); PV_STAT(pv_entry_spare++); @@ -2390,8 +2408,8 @@ get_pv_entry(pmap_t pmap, boolean_t try) struct pv_chunk *pc; vm_page_t m; + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); PV_STAT(pv_entry_allocs++); pv_entry_count++; if (pv_entry_count > pv_entry_high_water) @@ -2428,8 +2446,8 @@ retry: } } /* - * Access to the ptelist "pv_vafree" is synchronized by the page - * queues lock. If "pv_vafree" is currently non-empty, it will + * Access to the ptelist "pv_vafree" is synchronized by the pvh + * global lock. If "pv_vafree" is currently non-empty, it will * remain non-empty until pmap_ptelist_alloc() completes. */ if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | @@ -2463,7 +2481,7 @@ pmap_pvh_remove(struct md_page *pvh, pma { pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); @@ -2481,7 +2499,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offse vm_offset_t va_last; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT((pa & PDRMASK) == 0, ("pmap_pv_demote_pde: pa is not 4mpage aligned")); @@ -2514,7 +2532,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offs vm_offset_t va_last; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT((pa & PDRMASK) == 0, ("pmap_pv_promote_pde: pa is not 4mpage aligned")); @@ -2555,7 +2573,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t { struct md_page *pvh; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -2573,8 +2591,8 @@ pmap_insert_entry(pmap_t pmap, vm_offset { pv_entry_t pv; + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); pv = get_pv_entry(pmap, FALSE); pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); @@ -2588,8 +2606,8 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm { pv_entry_t pv; + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (pv_entry_count < pv_entry_high_water && (pv = get_pv_entry(pmap, TRUE)) != NULL) { pv->pv_va = va; @@ -2608,7 +2626,7 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offse struct md_page *pvh; pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); if (pv_entry_count < pv_entry_high_water && (pv = get_pv_entry(pmap, TRUE)) != NULL) { pv->pv_va = va; @@ -2686,7 +2704,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t */ if (va >= KERNBASE) firstpte = &KPTmap[i386_btop(trunc_4mpage(va))]; - else if (curthread->td_pinned > 0 && mtx_owned(&vm_page_queue_mtx)) { + else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { if ((*PMAP1 & PG_FRAME) != mptepa) { *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; #ifdef SMP @@ -2845,7 +2863,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t pt_entry_t oldpte; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpte = pte_load_clear(ptq); if (oldpte & PG_W) @@ -2876,7 +2894,7 @@ pmap_remove_page(pmap_t pmap, vm_offset_ { pt_entry_t *pte; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) @@ -2908,7 +2926,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva anyvalid = 0; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); sched_pin(); PMAP_LOCK(pmap); @@ -2997,7 +3015,7 @@ out: sched_unpin(); if (anyvalid) pmap_invalidate_all(pmap); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); pmap_free_zero_pages(free); } @@ -3029,7 +3047,7 @@ pmap_remove_all(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); free = NULL; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); sched_pin(); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; @@ -3070,7 +3088,7 @@ small_mappings: } vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); pmap_free_zero_pages(free); } @@ -3146,7 +3164,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sv else { pv_lists_locked = TRUE; resume: - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); sched_pin(); } anychanged = FALSE; @@ -3190,7 +3208,7 @@ resume: } else { if (!pv_lists_locked) { pv_lists_locked = TRUE; - if (!mtx_trylock(&vm_page_queue_mtx)) { + if (!rw_try_wlock(&pvh_global_lock)) { if (anychanged) pmap_invalidate_all( pmap); @@ -3259,7 +3277,7 @@ retry: pmap_invalidate_all(pmap); if (pv_lists_locked) { sched_unpin(); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } PMAP_UNLOCK(pmap); } @@ -3430,7 +3448,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, mpte = NULL; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); sched_pin(); @@ -3600,7 +3618,7 @@ validate: pmap_promote_pde(pmap, pde, va); sched_unpin(); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3615,7 +3633,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t { pd_entry_t *pde, newpde; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); pde = pmap_pde(pmap, va); if (*pde != 0) { @@ -3684,7 +3702,7 @@ pmap_enter_object(pmap_t pmap, vm_offset psize = atop(end - start); mpte = NULL; m = m_start; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { va = start + ptoa(diff); @@ -3698,7 +3716,7 @@ pmap_enter_object(pmap_t pmap, vm_offset mpte); m = TAILQ_NEXT(m, listq); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3715,10 +3733,10 @@ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3733,7 +3751,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* @@ -3939,9 +3957,9 @@ retry: if (!wired != ((*pde & PG_W) == 0)) { if (!are_queues_locked) { are_queues_locked = TRUE; - if (!mtx_trylock(&vm_page_queue_mtx)) { + if (!rw_try_wlock(&pvh_global_lock)) { PMAP_UNLOCK(pmap); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); goto retry; } } @@ -3965,7 +3983,7 @@ retry: pmap_pte_release(pte); out: if (are_queues_locked) - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3994,7 +4012,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm if (!pmap_is_current(src_pmap)) return; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); @@ -4084,7 +4102,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm } out: sched_unpin(); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } @@ -4226,7 +4244,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (PV_PMAP(pv) == pmap) { rv = TRUE; @@ -4248,7 +4266,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p break; } } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -4266,13 +4284,13 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); count = pmap_pvh_wired_mappings(&m->md, count); if ((m->flags & PG_FICTITIOUS) == 0) { count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (count); } @@ -4288,7 +4306,7 @@ pmap_pvh_wired_mappings(struct md_page * pt_entry_t *pte; pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -4313,11 +4331,11 @@ pmap_page_is_mapped(vm_page_t m) if ((m->oflags & VPO_UNMANAGED) != 0) return (FALSE); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || ((m->flags & PG_FICTITIOUS) == 0 && !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -4347,7 +4365,7 @@ pmap_remove_pages(pmap_t pmap) printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); sched_pin(); TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { @@ -4451,7 +4469,7 @@ pmap_remove_pages(pmap_t pmap) } sched_unpin(); pmap_invalidate_all(pmap); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); pmap_free_zero_pages(free); } @@ -4479,11 +4497,11 @@ pmap_is_modified(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); rv = pmap_is_modified_pvh(&m->md) || ((m->flags & PG_FICTITIOUS) == 0 && pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -4500,7 +4518,7 @@ pmap_is_modified_pvh(struct md_page *pvh pmap_t pmap; boolean_t rv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); rv = FALSE; sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { @@ -4553,11 +4571,11 @@ pmap_is_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); rv = pmap_is_referenced_pvh(&m->md) || ((m->flags & PG_FICTITIOUS) == 0 && pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -4573,7 +4591,7 @@ pmap_is_referenced_pvh(struct md_page *p pmap_t pmap; boolean_t rv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); rv = FALSE; sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { @@ -4614,7 +4632,7 @@ pmap_remove_write(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); sched_pin(); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; @@ -4655,7 +4673,7 @@ retry: } vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***