From owner-svn-src-all@FreeBSD.ORG Sat May 8 20:34:02 2010 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 5BFC81065674; Sat, 8 May 2010 20:34:02 +0000 (UTC) (envelope-from alc@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [69.147.83.44]) by mx1.freebsd.org (Postfix) with ESMTP id 489E28FC0C; Sat, 8 May 2010 20:34:02 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id o48KY2Zp021665; Sat, 8 May 2010 20:34:02 GMT (envelope-from alc@svn.freebsd.org) Received: (from alc@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id o48KY1vZ021646; Sat, 8 May 2010 20:34:01 GMT (envelope-from alc@svn.freebsd.org) Message-Id: <201005082034.o48KY1vZ021646@svn.freebsd.org> From: Alan Cox Date: Sat, 8 May 2010 20:34:01 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r207796 - in head/sys: amd64/amd64 arm/arm i386/i386 i386/xen ia64/ia64 kern mips/mips powerpc/aim powerpc/booke sparc64/sparc64 sun4v/sun4v sys vm X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 08 May 2010 20:34:02 -0000 Author: alc Date: Sat May 8 20:34:01 2010 New Revision: 207796 URL: http://svn.freebsd.org/changeset/base/207796 Log: Push down the page queues into vm_page_cache(), vm_page_try_to_cache(), and vm_page_try_to_free(). Consequently, push down the page queues lock into pmap_enter_quick(), pmap_page_wired_mapped(), pmap_remove_all(), and pmap_remove_write(). Push down the page queues lock into Xen's pmap_page_is_mapped(). (I overlooked the Xen pmap in r207702.) Switch to a per-processor counter for the total number of pages cached. Modified: head/sys/amd64/amd64/pmap.c head/sys/arm/arm/pmap.c head/sys/i386/i386/pmap.c head/sys/i386/xen/pmap.c head/sys/ia64/ia64/pmap.c head/sys/kern/subr_uio.c head/sys/kern/vfs_bio.c head/sys/mips/mips/pmap.c head/sys/powerpc/aim/mmu_oea.c head/sys/powerpc/aim/mmu_oea64.c head/sys/powerpc/booke/pmap.c head/sys/sparc64/sparc64/pmap.c head/sys/sun4v/sun4v/pmap.c head/sys/sys/vmmeter.h head/sys/vm/swap_pager.c head/sys/vm/vm_fault.c head/sys/vm/vm_object.c head/sys/vm/vm_page.c head/sys/vm/vm_pageout.c Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/amd64/amd64/pmap.c Sat May 8 20:34:01 2010 (r207796) @@ -2796,7 +2796,7 @@ pmap_remove_all(vm_page_t m) KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_remove_all: page %p is fictitious", m)); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { pmap = PV_PMAP(pv); @@ -2834,6 +2834,7 @@ pmap_remove_all(vm_page_t m) PMAP_UNLOCK(pmap); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* @@ -3414,8 +3415,10 @@ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { + vm_page_lock_queues(); PMAP_LOCK(pmap); - (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL); + (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); + vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -3926,8 +3929,11 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); + vm_page_lock_queues(); count = pmap_pvh_wired_mappings(&m->md, count); - return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count)); + count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count); + vm_page_unlock_queues(); + return (count); } /* @@ -4237,7 +4243,7 @@ pmap_remove_write(vm_page_t m) if ((m->flags & PG_FICTITIOUS) != 0 || (m->flags & PG_WRITEABLE) == 0) return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { pmap = PV_PMAP(pv); @@ -4268,6 +4274,7 @@ retry: PMAP_UNLOCK(pmap); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* Modified: head/sys/arm/arm/pmap.c ============================================================================== --- head/sys/arm/arm/pmap.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/arm/arm/pmap.c Sat May 8 20:34:01 2010 (r207796) @@ -3118,18 +3118,11 @@ pmap_remove_all(vm_page_t m) pmap_t curpm; int flags = 0; -#if defined(PMAP_DEBUG) - /* - * XXX This makes pmap_remove_all() illegal for non-managed pages! - */ - if (m->flags & PG_FICTITIOUS) { - panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m)); - } -#endif - + KASSERT((m->flags & PG_FICTITIOUS) == 0, + ("pmap_remove_all: page %p is fictitious", m)); if (TAILQ_EMPTY(&m->md.pv_list)) return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); pmap_remove_write(m); curpm = vmspace_pmap(curproc->p_vmspace); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { @@ -3180,6 +3173,7 @@ pmap_remove_all(vm_page_t m) pmap_tlb_flushD(curpm); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } @@ -3615,9 +3609,11 @@ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { + vm_page_lock_queues(); PMAP_LOCK(pmap); pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT); + vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -4450,10 +4446,11 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) if ((pv->pv_flags & PVF_WIRED) != 0) count++; + vm_page_unlock_queues(); return (count); } @@ -4530,8 +4527,11 @@ void pmap_remove_write(vm_page_t m) { - if (m->flags & PG_WRITEABLE) + if (m->flags & PG_WRITEABLE) { + vm_page_lock_queues(); pmap_clearbit(m, PVF_WRITE); + vm_page_unlock_queues(); + } } Modified: head/sys/i386/i386/pmap.c ============================================================================== --- head/sys/i386/i386/pmap.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/i386/i386/pmap.c Sat May 8 20:34:01 2010 (r207796) @@ -2900,7 +2900,7 @@ pmap_remove_all(vm_page_t m) KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_remove_all: page %p is fictitious", m)); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); sched_pin(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { @@ -2940,6 +2940,7 @@ pmap_remove_all(vm_page_t m) } vm_page_flag_clear(m, PG_WRITEABLE); sched_unpin(); + vm_page_unlock_queues(); } /* @@ -3544,8 +3545,10 @@ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { + vm_page_lock_queues(); PMAP_LOCK(pmap); - (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL); + (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); + vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -4088,8 +4091,11 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); + vm_page_lock_queues(); count = pmap_pvh_wired_mappings(&m->md, count); - return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count)); + count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count); + vm_page_unlock_queues(); + return (count); } /* @@ -4404,10 +4410,10 @@ pmap_remove_write(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & PG_FICTITIOUS) != 0 || (m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); sched_pin(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { @@ -4445,6 +4451,7 @@ retry: } vm_page_flag_clear(m, PG_WRITEABLE); sched_unpin(); + vm_page_unlock_queues(); } /* Modified: head/sys/i386/xen/pmap.c ============================================================================== --- head/sys/i386/xen/pmap.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/i386/xen/pmap.c Sat May 8 20:34:01 2010 (r207796) @@ -2485,16 +2485,9 @@ pmap_remove_all(vm_page_t m) pt_entry_t *pte, tpte; vm_page_t free; -#if defined(PMAP_DIAGNOSTIC) - /* - * XXX This makes pmap_remove_all() illegal for non-managed pages! - */ - if (m->flags & PG_FICTITIOUS) { - panic("pmap_remove_all: illegal for unmanaged page, va: 0x%jx", - VM_PAGE_TO_PHYS(m) & 0xffffffff); - } -#endif - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + KASSERT((m->flags & PG_FICTITIOUS) == 0, + ("pmap_remove_all: page %p is fictitious", m)); + vm_page_lock_queues(); sched_pin(); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pmap = PV_PMAP(pv); @@ -2531,6 +2524,7 @@ pmap_remove_all(vm_page_t m) if (*PMAP1) PT_SET_MA(PADDR1, 0); sched_unpin(); + vm_page_unlock_queues(); } /* @@ -2946,10 +2940,12 @@ pmap_enter_quick(pmap_t pmap, vm_offset_ CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", pmap, va, m, prot); + vm_page_lock_queues(); PMAP_LOCK(pmap); - (void) pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); + (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); if (count) HYPERVISOR_multicall(&mcl, count); + vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -3504,7 +3500,7 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); sched_pin(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -3515,6 +3511,7 @@ pmap_page_wired_mappings(vm_page_t m) PMAP_UNLOCK(pmap); } sched_unpin(); + vm_page_unlock_queues(); return (count); } @@ -3525,16 +3522,15 @@ pmap_page_wired_mappings(vm_page_t m) boolean_t pmap_page_is_mapped(vm_page_t m) { - struct md_page *pvh; + boolean_t rv; if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (FALSE); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if (TAILQ_EMPTY(&m->md.pv_list)) { - pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - return (!TAILQ_EMPTY(&pvh->pv_list)); - } else - return (TRUE); + vm_page_lock_queues(); + rv = !TAILQ_EMPTY(&m->md.pv_list) || + !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list); + vm_page_unlock_queues(); + return (rv); } /* @@ -3784,10 +3780,10 @@ pmap_remove_write(vm_page_t m) pmap_t pmap; pt_entry_t oldpte, *pte; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & PG_FICTITIOUS) != 0 || (m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); sched_pin(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -3818,6 +3814,7 @@ retry: if (*PMAP1) PT_SET_MA(PADDR1, 0); sched_unpin(); + vm_page_unlock_queues(); } /* Modified: head/sys/ia64/ia64/pmap.c ============================================================================== --- head/sys/ia64/ia64/pmap.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/ia64/ia64/pmap.c Sat May 8 20:34:01 2010 (r207796) @@ -1392,15 +1392,9 @@ pmap_remove_all(vm_page_t m) pmap_t oldpmap; pv_entry_t pv; -#if defined(DIAGNOSTIC) - /* - * XXX This makes pmap_remove_all() illegal for non-managed pages! - */ - if (m->flags & PG_FICTITIOUS) { - panic("pmap_remove_all: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m)); - } -#endif - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + KASSERT((m->flags & PG_FICTITIOUS) == 0, + ("pmap_remove_all: page %p is fictitious", m)); + vm_page_lock_queues(); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { struct ia64_lpte *pte; pmap_t pmap = pv->pv_pmap; @@ -1417,6 +1411,7 @@ pmap_remove_all(vm_page_t m) PMAP_UNLOCK(pmap); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* @@ -1655,9 +1650,11 @@ pmap_enter_quick(pmap_t pmap, vm_offset_ { pmap_t oldpmap; + vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); pmap_enter_quick_locked(pmap, va, m, prot); + vm_page_unlock_queues(); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } @@ -1875,7 +1872,7 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = pv->pv_pmap; PMAP_LOCK(pmap); @@ -1887,6 +1884,7 @@ pmap_page_wired_mappings(vm_page_t m) pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } + vm_page_unlock_queues(); return (count); } @@ -2118,10 +2116,10 @@ pmap_remove_write(vm_page_t m) pv_entry_t pv; vm_prot_t prot; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & PG_FICTITIOUS) != 0 || (m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = pv->pv_pmap; PMAP_LOCK(pmap); @@ -2142,6 +2140,7 @@ pmap_remove_write(vm_page_t m) PMAP_UNLOCK(pmap); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* Modified: head/sys/kern/subr_uio.c ============================================================================== --- head/sys/kern/subr_uio.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/kern/subr_uio.c Sat May 8 20:34:01 2010 (r207796) @@ -105,7 +105,6 @@ retry: if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco")) goto retry; vm_page_lock(user_pg); - vm_page_lock_queues(); pmap_remove_all(user_pg); vm_page_free(user_pg); vm_page_unlock(user_pg); @@ -117,11 +116,9 @@ retry: */ if (uobject->backing_object != NULL) pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE); - vm_page_lock_queues(); } vm_page_insert(kern_pg, uobject, upindex); vm_page_dirty(kern_pg); - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(uobject); vm_map_lookup_done(map, entry); return(KERN_SUCCESS); Modified: head/sys/kern/vfs_bio.c ============================================================================== --- head/sys/kern/vfs_bio.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/kern/vfs_bio.c Sat May 8 20:34:01 2010 (r207796) @@ -1579,7 +1579,6 @@ vfs_vmio_release(struct buf *bp) */ if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 && m->wire_count == 0) { - vm_page_lock_queues(); /* * Might as well free the page if we can and it has * no valid data. We also free the page if the @@ -1593,7 +1592,6 @@ vfs_vmio_release(struct buf *bp) } else if (buf_vm_page_count_severe()) { vm_page_try_to_cache(m); } - vm_page_unlock_queues(); } vm_page_unlock(m); } Modified: head/sys/mips/mips/pmap.c ============================================================================== --- head/sys/mips/mips/pmap.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/mips/mips/pmap.c Sat May 8 20:34:01 2010 (r207796) @@ -1595,7 +1595,7 @@ pmap_remove_all(vm_page_t m) KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_remove_all: page %p is fictitious", m)); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); if (m->md.pv_flags & PV_TABLE_REF) vm_page_flag_set(m, PG_REFERENCED); @@ -1646,6 +1646,7 @@ pmap_remove_all(vm_page_t m) vm_page_flag_clear(m, PG_WRITEABLE); m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); + vm_page_unlock_queues(); } /* @@ -1921,8 +1922,10 @@ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { + vm_page_lock_queues(); PMAP_LOCK(pmap); (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); + vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -2510,10 +2513,11 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) if (pv->pv_wired) count++; + vm_page_unlock_queues(); return (count); } @@ -2527,12 +2531,14 @@ pmap_remove_write(vm_page_t m) vm_offset_t va; pt_entry_t *pte; - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->flags & PG_FICTITIOUS) != 0 || + (m->flags & PG_WRITEABLE) == 0) return; /* * Loop over all current mappings setting/clearing as appropos. */ + vm_page_lock_queues(); for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) { npv = TAILQ_NEXT(pv, pv_plist); pte = pmap_pte(pv->pv_pmap, pv->pv_va); @@ -2545,6 +2551,7 @@ pmap_remove_write(vm_page_t m) VM_PROT_READ | VM_PROT_EXECUTE); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* Modified: head/sys/powerpc/aim/mmu_oea.c ============================================================================== --- head/sys/powerpc/aim/mmu_oea.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/powerpc/aim/mmu_oea.c Sat May 8 20:34:01 2010 (r207796) @@ -1208,11 +1208,12 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, v vm_prot_t prot) { + vm_page_lock_queues(); PMAP_LOCK(pm); moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + vm_page_unlock_queues(); PMAP_UNLOCK(pm); - } vm_paddr_t @@ -1322,10 +1323,10 @@ moea_remove_write(mmu_t mmu, vm_page_t m pmap_t pmap; u_int lo; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || (m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); lo = moea_attr_fetch(m); powerpc_sync(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { @@ -1351,6 +1352,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m vm_page_dirty(m); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* @@ -1518,10 +1520,11 @@ moea_page_wired_mappings(mmu_t mmu, vm_p count = 0; if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0) return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) if ((pvo->pvo_vaddr & PVO_WIRED) != 0) count++; + vm_page_unlock_queues(); return (count); } @@ -1732,8 +1735,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m) struct pvo_entry *pvo, *next_pvo; pmap_t pmap; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - + vm_page_lock_queues(); pvo_head = vm_page_to_pvoh(m); for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { next_pvo = LIST_NEXT(pvo, pvo_vlink); @@ -1749,6 +1751,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m) vm_page_dirty(m); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* Modified: head/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- head/sys/powerpc/aim/mmu_oea64.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/powerpc/aim/mmu_oea64.c Sat May 8 20:34:01 2010 (r207796) @@ -1341,11 +1341,13 @@ void moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { + + vm_page_lock_queues(); PMAP_LOCK(pm); moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + vm_page_unlock_queues(); PMAP_UNLOCK(pm); - } vm_paddr_t @@ -1517,10 +1519,10 @@ moea64_remove_write(mmu_t mmu, vm_page_t pmap_t pmap; uint64_t lo; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || (m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); lo = moea64_attr_fetch(m); SYNC(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { @@ -1547,6 +1549,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t vm_page_dirty(m); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* @@ -1710,10 +1713,11 @@ moea64_page_wired_mappings(mmu_t mmu, vm count = 0; if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0) return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) if ((pvo->pvo_vaddr & PVO_WIRED) != 0) count++; + vm_page_unlock_queues(); return (count); } @@ -1929,8 +1933,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m struct pvo_entry *pvo, *next_pvo; pmap_t pmap; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - + vm_page_lock_queues(); pvo_head = vm_page_to_pvoh(m); for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { next_pvo = LIST_NEXT(pvo, pvo_vlink); @@ -1946,6 +1949,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m vm_page_dirty(m); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* Modified: head/sys/powerpc/booke/pmap.c ============================================================================== --- head/sys/powerpc/booke/pmap.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/powerpc/booke/pmap.c Sat May 8 20:34:01 2010 (r207796) @@ -1722,9 +1722,11 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t vm_prot_t prot) { + vm_page_lock_queues(); PMAP_LOCK(pmap); mmu_booke_enter_locked(mmu, pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -1783,8 +1785,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_ pv_entry_t pv, pvn; uint8_t hold_flag; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - + vm_page_lock_queues(); for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { pvn = TAILQ_NEXT(pv, pv_link); @@ -1794,6 +1795,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_ PMAP_UNLOCK(pv->pv_pmap); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } /* @@ -1939,11 +1941,10 @@ mmu_booke_remove_write(mmu_t mmu, vm_pag pv_entry_t pv; pte_t *pte; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || (m->flags & PG_WRITEABLE) == 0) return; - + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { @@ -1967,6 +1968,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_pag PMAP_UNLOCK(pv->pv_pmap); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } static void @@ -2388,8 +2390,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, if ((m->flags & PG_FICTITIOUS) != 0) return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) @@ -2397,7 +2398,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, count++; PMAP_UNLOCK(pv->pv_pmap); } - + vm_page_unlock_queues(); return (count); } Modified: head/sys/sparc64/sparc64/pmap.c ============================================================================== --- head/sys/sparc64/sparc64/pmap.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/sparc64/sparc64/pmap.c Sat May 8 20:34:01 2010 (r207796) @@ -1240,7 +1240,7 @@ pmap_remove_all(vm_page_t m) struct tte *tp; vm_offset_t va; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) { tpn = TAILQ_NEXT(tp, tte_link); if ((tp->tte_data & TD_PV) == 0) @@ -1263,6 +1263,7 @@ pmap_remove_all(vm_page_t m) PMAP_UNLOCK(pm); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } int @@ -1502,9 +1503,11 @@ void pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { + vm_page_lock_queues(); PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + vm_page_unlock_queues(); PMAP_UNLOCK(pm); } @@ -1809,10 +1812,11 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED)) count++; + vm_page_unlock_queues(); return (count); } @@ -1981,10 +1985,10 @@ pmap_remove_write(vm_page_t m) struct tte *tp; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || (m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -1995,6 +1999,7 @@ pmap_remove_write(vm_page_t m) } } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } int Modified: head/sys/sun4v/sun4v/pmap.c ============================================================================== --- head/sys/sun4v/sun4v/pmap.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/sun4v/sun4v/pmap.c Sat May 8 20:34:01 2010 (r207796) @@ -1211,8 +1211,11 @@ pmap_enter_object(pmap_t pmap, vm_offset void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { + + vm_page_lock_queues(); PMAP_LOCK(pmap); pmap_enter_quick_locked(pmap, va, m, prot); + vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -1714,7 +1717,7 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = pv->pv_pmap; PMAP_LOCK(pmap); @@ -1723,6 +1726,7 @@ pmap_page_wired_mappings(vm_page_t m) count++; PMAP_UNLOCK(pmap); } + vm_page_unlock_queues(); return (count); } @@ -1732,12 +1736,15 @@ pmap_page_wired_mappings(vm_page_t m) void pmap_remove_write(vm_page_t m) { + if ((m->flags & PG_WRITEABLE) == 0) return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); tte_clear_phys_bit(m, VTD_SW_W|VTD_W); vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } + /* * Initialize the pmap associated with process 0. */ @@ -1956,7 +1963,7 @@ pmap_remove_all(vm_page_t m) uint64_t tte_data; DPRINTF("pmap_remove_all 0x%lx\n", VM_PAGE_TO_PHYS(m)); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_queues(); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { PMAP_LOCK(pv->pv_pmap); pv->pv_pmap->pm_stats.resident_count--; @@ -1986,6 +1993,7 @@ pmap_remove_all(vm_page_t m) free_pv_entry(pv); } vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_unlock_queues(); } static void Modified: head/sys/sys/vmmeter.h ============================================================================== --- head/sys/sys/vmmeter.h Sat May 8 20:08:01 2010 (r207795) +++ head/sys/sys/vmmeter.h Sat May 8 20:34:01 2010 (r207796) @@ -72,7 +72,7 @@ struct vmmeter { u_int v_pdwakeups; /* (f) times daemon has awaken from sleep */ u_int v_pdpages; /* (q) pages analyzed by daemon */ - u_int v_tcached; /* (q) total pages cached */ + u_int v_tcached; /* (p) total pages cached */ u_int v_dfree; /* (q) pages freed by daemon */ u_int v_pfree; /* (p) pages freed by exiting processes */ u_int v_tfree; /* (p) total pages freed */ Modified: head/sys/vm/swap_pager.c ============================================================================== --- head/sys/vm/swap_pager.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/vm/swap_pager.c Sat May 8 20:34:01 2010 (r207796) @@ -382,8 +382,10 @@ static void swp_pager_free_nrpage(vm_page_t m) { + vm_page_lock(m); if (m->wire_count == 0) vm_page_free(m); + vm_page_unlock(m); } /* @@ -1137,17 +1139,10 @@ swap_pager_getpages(vm_object_t object, if (0 < i || j < count) { int k; - - for (k = 0; k < i; ++k) { - vm_page_lock(m[k]); + for (k = 0; k < i; ++k) swp_pager_free_nrpage(m[k]); - vm_page_unlock(m[k]); - } - for (k = j; k < count; ++k) { - vm_page_lock(m[k]); + for (k = j; k < count; ++k) swp_pager_free_nrpage(m[k]); - vm_page_unlock(m[k]); - } } /* @@ -1514,8 +1509,6 @@ swp_pager_async_iodone(struct buf *bp) for (i = 0; i < bp->b_npages; ++i) { vm_page_t m = bp->b_pages[i]; - vm_page_lock(m); - vm_page_lock_queues(); m->oflags &= ~VPO_SWAPINPROG; if (bp->b_ioflags & BIO_ERROR) { @@ -1558,7 +1551,9 @@ swp_pager_async_iodone(struct buf *bp) * then finish the I/O. */ vm_page_dirty(m); + vm_page_lock(m); vm_page_activate(m); + vm_page_unlock(m); vm_page_io_finish(m); } } else if (bp->b_iocmd == BIO_READ) { @@ -1593,11 +1588,12 @@ swp_pager_async_iodone(struct buf *bp) * left busy. */ if (i != bp->b_pager.pg_reqpage) { + vm_page_lock(m); vm_page_deactivate(m); + vm_page_unlock(m); vm_page_wakeup(m); - } else { + } else vm_page_flash(m); - } } else { /* * For write success, clear the dirty @@ -1609,11 +1605,12 @@ swp_pager_async_iodone(struct buf *bp) " protected", m)); vm_page_undirty(m); vm_page_io_finish(m); - if (vm_page_count_severe()) + if (vm_page_count_severe()) { + vm_page_lock(m); vm_page_try_to_cache(m); + vm_page_unlock(m); + } } - vm_page_unlock_queues(); - vm_page_unlock(m); } /* Modified: head/sys/vm/vm_fault.c ============================================================================== --- head/sys/vm/vm_fault.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/vm/vm_fault.c Sat May 8 20:34:01 2010 (r207796) @@ -487,20 +487,16 @@ readrest: (mt->oflags & VPO_BUSY)) continue; vm_page_lock(mt); - vm_page_lock_queues(); if (mt->hold_count || mt->wire_count) { - vm_page_unlock_queues(); vm_page_unlock(mt); continue; } pmap_remove_all(mt); - if (mt->dirty) { + if (mt->dirty != 0) vm_page_deactivate(mt); - } else { + else vm_page_cache(mt); - } - vm_page_unlock_queues(); vm_page_unlock(mt); } ahead += behind; @@ -1025,13 +1021,8 @@ vm_fault_prefault(pmap_t pmap, vm_offset break; } if (m->valid == VM_PAGE_BITS_ALL && - (m->flags & PG_FICTITIOUS) == 0) { - vm_page_lock(m); - vm_page_lock_queues(); + (m->flags & PG_FICTITIOUS) == 0) pmap_enter_quick(pmap, addr, m, entry->protection); - vm_page_unlock_queues(); - vm_page_unlock(m); - } VM_OBJECT_UNLOCK(lobject); } } Modified: head/sys/vm/vm_object.c ============================================================================== --- head/sys/vm/vm_object.c Sat May 8 20:08:01 2010 (r207795) +++ head/sys/vm/vm_object.c Sat May 8 20:34:01 2010 (r207796) @@ -876,13 +876,8 @@ vm_object_page_clean(vm_object_t object, p->oflags |= VPO_CLEANCHK; if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) clearobjflags = 0; - else { - vm_page_lock(p); - vm_page_lock_queues(); + else pmap_remove_write(p); - vm_page_unlock_queues(); - vm_page_unlock(p); - } } if (clearobjflags && (tstart == 0) && (tend == object->size)) @@ -1048,11 +1043,7 @@ vm_object_page_collect_flush(vm_object_t vm_pageout_flush(ma, runlen, pagerflags); for (i = 0; i < runlen; i++) { if (ma[i]->dirty) { - vm_page_lock(ma[i]); - vm_page_lock_queues(); pmap_remove_write(ma[i]); - vm_page_unlock_queues(); - vm_page_unlock(ma[i]); ma[i]->oflags |= VPO_CLEANCHK; /* @@ -1968,7 +1959,6 @@ again: * if "clean_only" is FALSE. */ vm_page_lock(p); - vm_page_lock_queues(); if ((wirings = p->wire_count) != 0 && (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { /* Fictitious pages do not have managed mappings. */ @@ -1980,7 +1970,6 @@ again: p->valid = 0; vm_page_undirty(p); } - vm_page_unlock_queues(); vm_page_unlock(p); continue; } @@ -1991,7 +1980,6 @@ again: *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***