Date: Tue, 9 Aug 2011 21:01:37 +0000 (UTC) From: Konstantin Belousov <kib@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r224746 - in head/sys: amd64/amd64 arm/arm i386/i386 i386/xen ia64/ia64 mips/mips powerpc/aim powerpc/booke powerpc/include sparc64/sparc64 vm Message-ID: <201108092101.p79L1b2R011923@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kib Date: Tue Aug 9 21:01:36 2011 New Revision: 224746 URL: http://svn.freebsd.org/changeset/base/224746 Log: - Move the PG_UNMANAGED flag from m->flags to m->oflags, renaming the flag to VPO_UNMANAGED (and also making the flag protected by the vm object lock, instead of vm page queue lock). - Mark the fake pages with both PG_FICTITIOUS (as it is now) and VPO_UNMANAGED. As a consequence, pmap code now can use use just VPO_UNMANAGED to decide whether the page is unmanaged. Reviewed by: alc Tested by: pho (x86, previous version), marius (sparc64), marcel (arm, ia64, powerpc), ray (mips) Sponsored by: The FreeBSD Foundation Approved by: re (bz) Modified: head/sys/amd64/amd64/pmap.c head/sys/arm/arm/pmap.c head/sys/i386/i386/pmap.c head/sys/i386/xen/pmap.c head/sys/ia64/ia64/pmap.c head/sys/mips/mips/pmap.c head/sys/powerpc/aim/mmu_oea.c head/sys/powerpc/aim/mmu_oea64.c head/sys/powerpc/booke/pmap.c head/sys/powerpc/include/pmap.h head/sys/sparc64/sparc64/pmap.c head/sys/sparc64/sparc64/tsb.c head/sys/vm/vm_kern.c head/sys/vm/vm_object.c head/sys/vm/vm_page.c head/sys/vm/vm_page.h Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Tue Aug 9 20:55:54 2011 (r224745) +++ head/sys/amd64/amd64/pmap.c Tue Aug 9 21:01:36 2011 (r224746) @@ -2320,7 +2320,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offse va_last = va + NBPDR - PAGE_SIZE; do { m++; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_pv_demote_pde: page %p is not managed", m)); va += PAGE_SIZE; pmap_insert_entry(pmap, va, m); @@ -2847,7 +2847,7 @@ pmap_remove_all(vm_page_t m) vm_offset_t va; vm_page_t free; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); free = NULL; vm_page_lock_queues(); @@ -3194,8 +3194,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va)); - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || - (m->oflags & VPO_BUSY) != 0, + KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || + VM_OBJECT_LOCKED(m->object), ("pmap_enter: page %p is not busy", m)); mpte = NULL; @@ -3276,7 +3276,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if (pv == NULL) @@ -3389,7 +3389,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t } newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) | PG_PS | PG_V; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) { newpde |= PG_MANAGED; /* @@ -3498,7 +3498,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ vm_paddr_t pa; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || - (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, + (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -3556,7 +3556,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && + if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m)) { if (mpte != NULL) { free = NULL; @@ -3581,7 +3581,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ /* * Now validate mapping with RO protection */ - if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) + if ((m->oflags & VPO_UNMANAGED) != 0) pte_store(pte, pa | PG_V | PG_U); else pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); @@ -3958,7 +3958,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p int loops = 0; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -3999,7 +3999,7 @@ pmap_page_wired_mappings(vm_page_t m) int count; count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) + if ((m->oflags & VPO_UNMANAGED) != 0) return (count); vm_page_lock_queues(); count = pmap_pvh_wired_mappings(&m->md, count); @@ -4041,7 +4041,7 @@ pmap_page_is_mapped(vm_page_t m) { boolean_t rv; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) + if ((m->oflags & VPO_UNMANAGED) != 0) return (FALSE); vm_page_lock_queues(); rv = !TAILQ_EMPTY(&m->md.pv_list) || @@ -4199,7 +4199,7 @@ pmap_is_modified(vm_page_t m) { boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* @@ -4280,7 +4280,7 @@ pmap_is_referenced(vm_page_t m) { boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); vm_page_lock_queues(); rv = pmap_is_referenced_pvh(&m->md) || @@ -4328,7 +4328,7 @@ pmap_remove_write(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* @@ -4397,7 +4397,7 @@ pmap_ts_referenced(vm_page_t m) vm_offset_t va; int rtval = 0; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); vm_page_lock_queues(); @@ -4471,7 +4471,7 @@ pmap_clear_modify(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, @@ -4548,7 +4548,7 @@ pmap_clear_reference(vm_page_t m) pt_entry_t *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); Modified: head/sys/arm/arm/pmap.c ============================================================================== --- head/sys/arm/arm/pmap.c Tue Aug 9 20:55:54 2011 (r224745) +++ head/sys/arm/arm/pmap.c Tue Aug 9 21:01:36 2011 (r224746) @@ -3120,7 +3120,7 @@ pmap_remove_all(vm_page_t m) pmap_t curpm; int flags = 0; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); if (TAILQ_EMPTY(&m->md.pv_list)) return; @@ -3242,7 +3242,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, PTE_SYNC(ptep); if (pg != NULL) { - if (!(pg->flags & PG_UNMANAGED)) { + if (!(pg->oflags & VPO_UNMANAGED)) { f = pmap_modify_pv(pg, pm, sva, PVF_WRITE, 0); vm_page_dirty(pg); @@ -3327,8 +3327,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset pa = systempage.pv_pa; m = NULL; } else { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || - (m->oflags & VPO_BUSY) != 0 || (flags & M_NOWAIT) != 0, + KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || + (flags & M_NOWAIT) != 0, ("pmap_enter_locked: page %p is not busy", m)); pa = VM_PAGE_TO_PHYS(m); } @@ -3417,7 +3417,7 @@ do_l2b_alloc: if (prot & VM_PROT_WRITE) { npte |= L2_S_PROT_W; if (m != NULL && - (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) + (m->oflags & VPO_UNMANAGED) == 0) vm_page_flag_set(m, PG_WRITEABLE); } npte |= pte_l2_s_cache_mode; @@ -3480,36 +3480,36 @@ do_l2b_alloc: * this physical page is not/is already mapped. */ - if (m && ((m->flags & PG_FICTITIOUS) || - ((m->flags & PG_UNMANAGED) && + if (m && (m->oflags & VPO_UNMANAGED) && !m->md.pv_kva && - TAILQ_EMPTY(&m->md.pv_list)))) { + TAILQ_EMPTY(&m->md.pv_list)) { pmap_free_pv_entry(pve); pve = NULL; } - } else if (m && !(m->flags & PG_FICTITIOUS) && - (!(m->flags & PG_UNMANAGED) || m->md.pv_kva || + } else if (m && + (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || !TAILQ_EMPTY(&m->md.pv_list))) pve = pmap_get_pv_entry(); - } else if (m && !(m->flags & PG_FICTITIOUS) && - (!(m->flags & PG_UNMANAGED) || m->md.pv_kva || + } else if (m && + (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || !TAILQ_EMPTY(&m->md.pv_list))) pve = pmap_get_pv_entry(); - if (m && !(m->flags & PG_FICTITIOUS)) { - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, - ("pmap_enter: managed mapping within the clean submap")); - if (m->flags & PG_UNMANAGED) { + if (m) { + if ((m->oflags & VPO_UNMANAGED)) { if (!TAILQ_EMPTY(&m->md.pv_list) || - m->md.pv_kva) { + m->md.pv_kva) { KASSERT(pve != NULL, ("No pv")); nflags |= PVF_UNMAN; pmap_enter_pv(m, pve, pmap, va, nflags); } else m->md.pv_kva = va; } else { - KASSERT(pve != NULL, ("No pv")); - pmap_enter_pv(m, pve, pmap, va, nflags); + KASSERT(va < kmi.clean_sva || + va >= kmi.clean_eva, + ("pmap_enter: managed mapping within the clean submap")); + KASSERT(pve != NULL, ("No pv")); + pmap_enter_pv(m, pve, pmap, va, nflags); } } } @@ -4423,7 +4423,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p int loops = 0; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -4453,7 +4453,7 @@ pmap_page_wired_mappings(vm_page_t m) int count; count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) + if ((m->oflags & VPO_UNMANAGED) != 0) return (count); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) @@ -4472,7 +4472,7 @@ int pmap_ts_referenced(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); return (pmap_clearbit(m, PVF_REF)); } @@ -4482,7 +4482,7 @@ boolean_t pmap_is_modified(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); if (m->md.pvh_attrs & PVF_MOD) return (TRUE); @@ -4498,7 +4498,7 @@ void pmap_clear_modify(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, @@ -4526,7 +4526,7 @@ boolean_t pmap_is_referenced(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); return ((m->md.pvh_attrs & PVF_REF) != 0); } @@ -4540,7 +4540,7 @@ void pmap_clear_reference(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); if (m->md.pvh_attrs & PVF_REF) pmap_clearbit(m, PVF_REF); @@ -4554,7 +4554,7 @@ void pmap_remove_write(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* Modified: head/sys/i386/i386/pmap.c ============================================================================== --- head/sys/i386/i386/pmap.c Tue Aug 9 20:55:54 2011 (r224745) +++ head/sys/i386/i386/pmap.c Tue Aug 9 21:01:36 2011 (r224746) @@ -2400,7 +2400,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offse va_last = va + NBPDR - PAGE_SIZE; do { m++; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_pv_demote_pde: page %p is not managed", m)); va += PAGE_SIZE; pmap_insert_entry(pmap, va, m); @@ -2927,7 +2927,7 @@ pmap_remove_all(vm_page_t m) vm_offset_t va; vm_page_t free; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); free = NULL; vm_page_lock_queues(); @@ -3299,8 +3299,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va)); - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || - (m->oflags & VPO_BUSY) != 0, + KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || + VM_OBJECT_LOCKED(m->object), ("pmap_enter: page %p is not busy", m)); mpte = NULL; @@ -3388,7 +3388,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if (pv == NULL) @@ -3498,7 +3498,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t } newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) | PG_PS | PG_V; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) { newpde |= PG_MANAGED; /* @@ -3604,7 +3604,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ vm_page_t free; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || - (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, + (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -3667,7 +3667,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && + if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m)) { if (mpte != NULL) { free = NULL; @@ -3695,7 +3695,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ /* * Now validate mapping with RO protection */ - if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) + if ((m->oflags & VPO_UNMANAGED) != 0) pte_store(pte, pa | PG_V | PG_U); else pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); @@ -4096,7 +4096,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p int loops = 0; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -4137,7 +4137,7 @@ pmap_page_wired_mappings(vm_page_t m) int count; count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) + if ((m->oflags & VPO_UNMANAGED) != 0) return (count); vm_page_lock_queues(); count = pmap_pvh_wired_mappings(&m->md, count); @@ -4181,7 +4181,7 @@ pmap_page_is_mapped(vm_page_t m) { boolean_t rv; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) + if ((m->oflags & VPO_UNMANAGED) != 0) return (FALSE); vm_page_lock_queues(); rv = !TAILQ_EMPTY(&m->md.pv_list) || @@ -4341,7 +4341,7 @@ pmap_is_modified(vm_page_t m) { boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* @@ -4424,7 +4424,7 @@ pmap_is_referenced(vm_page_t m) { boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); vm_page_lock_queues(); rv = pmap_is_referenced_pvh(&m->md) || @@ -4474,7 +4474,7 @@ pmap_remove_write(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* @@ -4550,7 +4550,7 @@ pmap_ts_referenced(vm_page_t m) vm_offset_t va; int rtval = 0; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); vm_page_lock_queues(); @@ -4626,7 +4626,7 @@ pmap_clear_modify(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, @@ -4715,7 +4715,7 @@ pmap_clear_reference(vm_page_t m) pt_entry_t *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); vm_page_lock_queues(); sched_pin(); Modified: head/sys/i386/xen/pmap.c ============================================================================== --- head/sys/i386/xen/pmap.c Tue Aug 9 20:55:54 2011 (r224745) +++ head/sys/i386/xen/pmap.c Tue Aug 9 21:01:36 2011 (r224746) @@ -2430,7 +2430,7 @@ pmap_remove_all(vm_page_t m) pt_entry_t *pte, tpte; vm_page_t free; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); free = NULL; vm_page_lock_queues(); @@ -2616,8 +2616,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va)); - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || - (m->oflags & VPO_BUSY) != 0, + KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0, ("pmap_enter: page %p is not busy", m)); mpte = NULL; @@ -2715,7 +2714,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if (pv == NULL) @@ -2915,7 +2914,7 @@ pmap_enter_quick_locked(multicall_entry_ multicall_entry_t *mcl = *mclpp; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || - (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, + (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -2979,7 +2978,7 @@ pmap_enter_quick_locked(multicall_entry_ /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && + if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m)) { if (mpte != NULL) { free = NULL; @@ -3008,7 +3007,7 @@ pmap_enter_quick_locked(multicall_entry_ /* * Now validate mapping with RO protection */ - if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) + if ((m->oflags & VPO_UNMANAGED) != 0) pte_store(pte, pa | PG_V | PG_U); else pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); @@ -3016,7 +3015,7 @@ pmap_enter_quick_locked(multicall_entry_ /* * Now validate mapping with RO protection */ - if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) + if ((m->oflags & VPO_UNMANAGED) != 0) pa = xpmap_ptom(pa | PG_V | PG_U); else pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); @@ -3403,7 +3402,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p int loops = 0; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -3435,7 +3434,7 @@ pmap_page_wired_mappings(vm_page_t m) int count; count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) + if ((m->oflags & VPO_UNMANAGED) != 0) return (count); vm_page_lock_queues(); sched_pin(); @@ -3461,7 +3460,7 @@ pmap_page_is_mapped(vm_page_t m) { boolean_t rv; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) + if ((m->oflags & VPO_UNMANAGED) != 0) return (FALSE); vm_page_lock_queues(); rv = !TAILQ_EMPTY(&m->md.pv_list) || @@ -3600,7 +3599,7 @@ pmap_is_modified(vm_page_t m) pmap_t pmap; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); rv = FALSE; @@ -3671,7 +3670,7 @@ pmap_is_referenced(vm_page_t m) pmap_t pmap; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -3732,7 +3731,7 @@ pmap_remove_write(vm_page_t m) pmap_t pmap; pt_entry_t oldpte, *pte; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* @@ -3798,7 +3797,7 @@ pmap_ts_referenced(vm_page_t m) pt_entry_t *pte; int rtval = 0; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); vm_page_lock_queues(); sched_pin(); @@ -3840,7 +3839,7 @@ pmap_clear_modify(vm_page_t m) pmap_t pmap; pt_entry_t *pte; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, @@ -3886,7 +3885,7 @@ pmap_clear_reference(vm_page_t m) pmap_t pmap; pt_entry_t *pte; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); vm_page_lock_queues(); sched_pin(); Modified: head/sys/ia64/ia64/pmap.c ============================================================================== --- head/sys/ia64/ia64/pmap.c Tue Aug 9 20:55:54 2011 (r224745) +++ head/sys/ia64/ia64/pmap.c Tue Aug 9 21:01:36 2011 (r224746) @@ -1442,7 +1442,7 @@ pmap_remove_all(vm_page_t m) pmap_t oldpmap; pv_entry_t pv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); vm_page_lock_queues(); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { @@ -1548,8 +1548,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, va &= ~PAGE_MASK; KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || - (m->oflags & VPO_BUSY) != 0, + KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0, ("pmap_enter: page %p is not busy", m)); /* @@ -1619,7 +1618,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); pmap_insert_entry(pmap, va, m); @@ -1720,7 +1719,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ boolean_t managed; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || - (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, + (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -1730,7 +1729,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ if (!pmap_present(pte)) { /* Enter on the PV list if the page is managed. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) { if (!pmap_try_insert_pv_entry(pmap, va, m)) { pmap_free_pte(pte, va); return; @@ -1900,7 +1899,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p int loops = 0; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -1932,7 +1931,7 @@ pmap_page_wired_mappings(vm_page_t m) int count; count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) + if ((m->oflags & VPO_UNMANAGED) != 0) return (count); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2010,7 +2009,7 @@ pmap_ts_referenced(vm_page_t m) pv_entry_t pv; int count = 0; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2044,7 +2043,7 @@ pmap_is_modified(vm_page_t m) pv_entry_t pv; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); rv = FALSE; @@ -2104,7 +2103,7 @@ pmap_is_referenced(vm_page_t m) pv_entry_t pv; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -2133,7 +2132,7 @@ pmap_clear_modify(vm_page_t m) pmap_t oldpmap; pv_entry_t pv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, @@ -2174,7 +2173,7 @@ pmap_clear_reference(vm_page_t m) pmap_t oldpmap; pv_entry_t pv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2203,7 +2202,7 @@ pmap_remove_write(vm_page_t m) pv_entry_t pv; vm_prot_t prot; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* Modified: head/sys/mips/mips/pmap.c ============================================================================== --- head/sys/mips/mips/pmap.c Tue Aug 9 20:55:54 2011 (r224745) +++ head/sys/mips/mips/pmap.c Tue Aug 9 21:01:36 2011 (r224746) @@ -1708,7 +1708,7 @@ pmap_remove_all(vm_page_t m) pv_entry_t pv; pt_entry_t *pte, tpte; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); vm_page_lock_queues(); @@ -1863,8 +1863,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, va &= ~PAGE_MASK; KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || - (m->oflags & VPO_BUSY) != 0, + KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0, ("pmap_enter: page %p is not busy", m)); mpte = NULL; @@ -1952,7 +1951,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if (pv == NULL) @@ -2067,7 +2066,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ vm_paddr_t pa; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || - (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, + (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -2129,7 +2128,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && + if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, mpte, va, m)) { if (mpte != NULL) { pmap_unwire_pte_hold(pmap, va, mpte); @@ -2464,7 +2463,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p int loops = 0; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -2558,7 +2557,7 @@ pmap_testbit(vm_page_t m, int bit) pt_entry_t *pte; boolean_t rv = FALSE; - if (m->flags & PG_FICTITIOUS) + if (m->oflags & VPO_UNMANAGED) return (rv); if (TAILQ_FIRST(&m->md.pv_list) == NULL) @@ -2585,7 +2584,7 @@ pmap_changebit(vm_page_t m, int bit, boo pv_entry_t pv; pt_entry_t *pte; - if (m->flags & PG_FICTITIOUS) + if (m->oflags & VPO_UNMANAGED) return; mtx_assert(&vm_page_queue_mtx, MA_OWNED); @@ -2634,7 +2633,7 @@ pmap_page_wired_mappings(vm_page_t m) int count; count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) + if ((m->oflags & VPO_UNMANAGED) != 0) return (count); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2659,7 +2658,7 @@ pmap_remove_write(vm_page_t m) vm_offset_t va; pt_entry_t *pte; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* @@ -2699,7 +2698,7 @@ int pmap_ts_referenced(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); if (m->md.pv_flags & PV_TABLE_REF) { vm_page_lock_queues(); @@ -2721,7 +2720,7 @@ pmap_is_modified(vm_page_t m) { boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* @@ -2775,7 +2774,7 @@ void pmap_clear_modify(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, @@ -2806,7 +2805,7 @@ boolean_t pmap_is_referenced(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); return ((m->md.pv_flags & PV_TABLE_REF) != 0); } @@ -2820,7 +2819,7 @@ void pmap_clear_reference(vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); vm_page_lock_queues(); if (m->md.pv_flags & PV_TABLE_REF) { @@ -3168,7 +3167,7 @@ page_is_managed(vm_paddr_t pa) m = PHYS_TO_VM_PAGE(pa); if (m == NULL) return (0); - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) + if ((m->oflags & VPO_UNMANAGED) == 0) return (1); } return (0); @@ -3181,7 +3180,7 @@ init_pte_prot(vm_offset_t va, vm_page_t if (!(prot & VM_PROT_WRITE)) rw = PTE_V | PTE_RO | PTE_C_CACHE; - else if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + else if ((m->oflags & VPO_UNMANAGED) == 0) { if ((m->md.pv_flags & PV_TABLE_MOD) != 0) rw = PTE_V | PTE_D | PTE_C_CACHE; else Modified: head/sys/powerpc/aim/mmu_oea.c ============================================================================== --- head/sys/powerpc/aim/mmu_oea.c Tue Aug 9 20:55:54 2011 (r224745) +++ head/sys/powerpc/aim/mmu_oea.c Tue Aug 9 21:01:36 2011 (r224746) @@ -1073,12 +1073,12 @@ moea_enter_locked(pmap_t pmap, vm_offset if (pmap_bootstrapped) mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || - (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), + KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || + VM_OBJECT_LOCKED(m->object), ("moea_enter_locked: page %p is not busy", m)); /* XXX change the pvo head for fake pages */ - if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { + if ((m->oflags & VPO_UNMANAGED) != 0) { pvo_flags &= ~PVO_MANAGED; pvo_head = &moea_pvo_kunmanaged; zone = moea_upvo_zone; @@ -1088,7 +1088,7 @@ moea_enter_locked(pmap_t pmap, vm_offset * If this is a managed page, and it's the first reference to the page, * clear the execness of the page. Otherwise fetch the execness. */ - if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) { + if ((pg != NULL) && ((m->oflags & VPO_UNMANAGED) == 0)) { if (LIST_EMPTY(pvo_head)) { moea_attr_clear(pg, PTE_EXEC); } else { @@ -1101,7 +1101,7 @@ moea_enter_locked(pmap_t pmap, vm_offset if (prot & VM_PROT_WRITE) { pte_lo |= PTE_BW; if (pmap_bootstrapped && - (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) + (m->oflags & VPO_UNMANAGED) == 0) vm_page_flag_set(m, PG_WRITEABLE); } else pte_lo |= PTE_BR; @@ -1112,9 +1112,6 @@ moea_enter_locked(pmap_t pmap, vm_offset if (wired) pvo_flags |= PVO_WIRED; - if ((m->flags & PG_FICTITIOUS) != 0) - pvo_flags |= PVO_FAKE; - error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); @@ -1245,7 +1242,7 @@ boolean_t moea_is_referenced(mmu_t mmu, vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_is_referenced: page %p is not managed", m)); return (moea_query_bit(m, PTE_REF)); } @@ -1254,7 +1251,7 @@ boolean_t moea_is_modified(mmu_t mmu, vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_is_modified: page %p is not managed", m)); /* @@ -1286,7 +1283,7 @@ void moea_clear_reference(mmu_t mmu, vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_clear_reference: page %p is not managed", m)); moea_clear_bit(m, PTE_REF); } @@ -1295,7 +1292,7 @@ void moea_clear_modify(mmu_t mmu, vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, @@ -1322,7 +1319,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m pmap_t pmap; u_int lo; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_remove_write: page %p is not managed", m)); /* @@ -1379,7 +1376,7 @@ boolean_t moea_ts_referenced(mmu_t mmu, vm_page_t m) { - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_ts_referenced: page %p is not managed", m)); return (moea_clear_bit(m, PTE_REF)); } *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201108092101.p79L1b2R011923>