From owner-svn-src-head@FreeBSD.ORG Tue Sep 6 10:30:11 2011 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id E28961065672; Tue, 6 Sep 2011 10:30:11 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id CE2918FC0C; Tue, 6 Sep 2011 10:30:11 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id p86AUBVU079037; Tue, 6 Sep 2011 10:30:11 GMT (envelope-from kib@svn.freebsd.org) Received: (from kib@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id p86AUBi8079026; Tue, 6 Sep 2011 10:30:11 GMT (envelope-from kib@svn.freebsd.org) Message-Id: <201109061030.p86AUBi8079026@svn.freebsd.org> From: Konstantin Belousov Date: Tue, 6 Sep 2011 10:30:11 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r225418 - in head/sys: amd64/amd64 arm/arm cddl/contrib/opensolaris/uts/common/fs/zfs fs/tmpfs i386/i386 i386/xen ia64/ia64 mips/mips powerpc/aim powerpc/booke powerpc/powerpc sparc64/s... X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 06 Sep 2011 10:30:12 -0000 Author: kib Date: Tue Sep 6 10:30:11 2011 New Revision: 225418 URL: http://svn.freebsd.org/changeset/base/225418 Log: Split the vm_page flags PG_WRITEABLE and PG_REFERENCED into atomic flags field. Updates to the atomic flags are performed using the atomic ops on the containing word, do not require any vm lock to be held, and are non-blocking. The vm_page_aflag_set(9) and vm_page_aflag_clear(9) functions are provided to modify afalgs. Document the changes to flags field to only require the page lock. Introduce vm_page_reference(9) function to provide a stable KPI and KBI for filesystems like tmpfs and zfs which need to mark a page as referenced. Reviewed by: alc, attilio Tested by: marius, flo (sparc64); andreast (powerpc, powerpc64) Approved by: re (bz) Modified: head/sys/amd64/amd64/pmap.c head/sys/arm/arm/pmap.c head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c head/sys/fs/tmpfs/tmpfs_vnops.c head/sys/i386/i386/pmap.c head/sys/i386/xen/pmap.c head/sys/ia64/ia64/pmap.c head/sys/mips/mips/pmap.c head/sys/powerpc/aim/mmu_oea.c head/sys/powerpc/aim/mmu_oea64.c head/sys/powerpc/booke/pmap.c head/sys/powerpc/powerpc/mmu_if.m head/sys/sparc64/sparc64/pmap.c head/sys/vm/swap_pager.c head/sys/vm/vm_fault.c head/sys/vm/vm_mmap.c head/sys/vm/vm_object.c head/sys/vm/vm_page.c head/sys/vm/vm_page.h head/sys/vm/vm_pageout.c head/sys/vm/vnode_pager.c Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Tue Sep 6 10:21:33 2011 (r225417) +++ head/sys/amd64/amd64/pmap.c Tue Sep 6 10:30:11 2011 (r225418) @@ -2123,7 +2123,7 @@ pmap_collect(pmap_t locked_pmap, struct KASSERT((tpte & PG_W) == 0, ("pmap_collect: wired pte %#lx", tpte)); if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); free = NULL; @@ -2137,7 +2137,7 @@ pmap_collect(pmap_t locked_pmap, struct } if (TAILQ_EMPTY(&m->md.pv_list) && TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } } @@ -2391,7 +2391,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } } @@ -2615,10 +2615,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpde & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if (TAILQ_EMPTY(&m->md.pv_list) && TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } } if (pmap == kernel_pmap) { @@ -2659,7 +2659,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); pmap_remove_entry(pmap, m, va); } return (pmap_unuse_pt(pmap, va, ptepde, free)); @@ -2872,7 +2872,7 @@ pmap_remove_all(vm_page_t m) if (tpte & PG_W) pmap->pm_stats.wired_count--; if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. @@ -2885,7 +2885,7 @@ pmap_remove_all(vm_page_t m) free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); vm_page_unlock_queues(); pmap_free_zero_pages(free); } @@ -3301,7 +3301,7 @@ validate: if ((prot & VM_PROT_WRITE) != 0) { newpte |= PG_RW; if ((newpte & PG_MANAGED) != 0) - vm_page_flag_set(m, PG_WRITEABLE); + vm_page_aflag_set(m, PGA_WRITEABLE); } if ((prot & VM_PROT_EXECUTE) == 0) newpte |= pg_nx; @@ -3325,7 +3325,7 @@ validate: origpte = pte_load_store(pte, newpte); if (origpte & PG_A) { if (origpte & PG_MANAGED) - vm_page_flag_set(om, PG_REFERENCED); + vm_page_aflag_set(om, PGA_REFERENCED); if (opa != VM_PAGE_TO_PHYS(m) || ((origpte & PG_NX) == 0 && (newpte & PG_NX))) invlva = TRUE; @@ -3339,7 +3339,7 @@ validate: if ((origpte & PG_MANAGED) != 0 && TAILQ_EMPTY(&om->md.pv_list) && TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)) - vm_page_flag_clear(om, PG_WRITEABLE); + vm_page_aflag_clear(om, PGA_WRITEABLE); if (invlva) pmap_invalidate_page(pmap, va); } else @@ -4147,7 +4147,7 @@ pmap_remove_pages(pmap_t pmap) if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) if (TAILQ_EMPTY(&mt->md.pv_list)) - vm_page_flag_clear(mt, PG_WRITEABLE); + vm_page_aflag_clear(mt, PGA_WRITEABLE); } mpte = pmap_lookup_pt_page(pmap, pv->pv_va); if (mpte != NULL) { @@ -4165,7 +4165,7 @@ pmap_remove_pages(pmap_t pmap) if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } } pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free); @@ -4203,13 +4203,13 @@ pmap_is_modified(vm_page_t m) ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); vm_page_lock_queues(); rv = pmap_is_modified_pvh(&m->md) || @@ -4332,13 +4332,13 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by - * another thread while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -4370,7 +4370,7 @@ retry: } PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); vm_page_unlock_queues(); } @@ -4478,11 +4478,11 @@ pmap_clear_modify(vm_page_t m) ("pmap_clear_modify: page %p is busy", m)); /* - * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. */ - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); Modified: head/sys/arm/arm/pmap.c ============================================================================== --- head/sys/arm/arm/pmap.c Tue Sep 6 10:21:33 2011 (r225417) +++ head/sys/arm/arm/pmap.c Tue Sep 6 10:30:11 2011 (r225418) @@ -1402,7 +1402,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_ if ((kwritable == 0) && (writable == 0)) { pg->md.pvh_attrs &= ~PVF_MOD; - vm_page_flag_clear(pg, PG_WRITEABLE); + vm_page_aflag_clear(pg, PGA_WRITEABLE); return; } } @@ -1568,7 +1568,7 @@ pmap_clearbit(struct vm_page *pg, u_int } if (maskbits & PVF_WRITE) - vm_page_flag_clear(pg, PG_WRITEABLE); + vm_page_aflag_clear(pg, PGA_WRITEABLE); vm_page_unlock_queues(); return (count); } @@ -1630,7 +1630,7 @@ pmap_enter_pv(struct vm_page *pg, struct pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); if (pve->pv_flags & PVF_WIRED) ++pm->pm_stats.wired_count; - vm_page_flag_set(pg, PG_REFERENCED); + vm_page_aflag_set(pg, PGA_REFERENCED); } /* @@ -1699,7 +1699,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t if (TAILQ_FIRST(&pg->md.pv_list) == NULL) pg->md.pvh_attrs &= ~PVF_REF; else - vm_page_flag_set(pg, PG_REFERENCED); + vm_page_aflag_set(pg, PGA_REFERENCED); if ((pve->pv_flags & PVF_NC) && ((pm == pmap_kernel()) || (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC))) pmap_fix_cache(pg, pm, 0); @@ -1709,7 +1709,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t break; if (!pve) { pg->md.pvh_attrs &= ~PVF_MOD; - vm_page_flag_clear(pg, PG_WRITEABLE); + vm_page_aflag_clear(pg, PGA_WRITEABLE); } } pv = TAILQ_FIRST(&pg->md.pv_list); @@ -1724,7 +1724,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t --pm->pm_stats.wired_count; pg->md.pvh_attrs &= ~PVF_REF; pg->md.pvh_attrs &= ~PVF_MOD; - vm_page_flag_clear(pg, PG_WRITEABLE); + vm_page_aflag_clear(pg, PGA_WRITEABLE); pmap_free_pv_entry(pv); } } @@ -2695,7 +2695,7 @@ pmap_remove_pages(pmap_t pmap) npv = TAILQ_NEXT(pv, pv_plist); pmap_nuke_pv(m, pmap, pv); if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); pmap_free_pv_entry(pv); pmap_free_l2_bucket(pmap, l2b, 1); } @@ -3172,7 +3172,7 @@ pmap_remove_all(vm_page_t m) else pmap_tlb_flushD(curpm); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); vm_page_unlock_queues(); } @@ -3406,7 +3406,7 @@ do_l2b_alloc: vm_page_dirty(m); } if (m && opte) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); } else { /* * Need to do page referenced emulation. @@ -3418,7 +3418,7 @@ do_l2b_alloc: npte |= L2_S_PROT_W; if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0) - vm_page_flag_set(m, PG_WRITEABLE); + vm_page_aflag_set(m, PGA_WRITEABLE); } npte |= pte_l2_s_cache_mode; if (m && m == opg) { @@ -4505,11 +4505,11 @@ pmap_clear_modify(vm_page_t m) ("pmap_clear_modify: page %p is busy", m)); /* - * If the page is not PG_WRITEABLE, then no mappings can be modified. + * If the page is not PGA_WRITEABLE, then no mappings can be modified. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. */ - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->aflags & PGA_WRITEABLE) == 0) return; if (m->md.pvh_attrs & PVF_MOD) pmap_clearbit(m, PVF_MOD); @@ -4558,13 +4558,13 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by - * another thread while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) != 0 || - (m->flags & PG_WRITEABLE) != 0) + (m->aflags & PGA_WRITEABLE) != 0) pmap_clearbit(m, PVF_WRITE); } Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c ============================================================================== --- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Tue Sep 6 10:21:33 2011 (r225417) +++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Tue Sep 6 10:30:11 2011 (r225418) @@ -331,8 +331,7 @@ page_lookup(vnode_t *vp, int64_t start, * sleeping so that the page daemon is less * likely to reclaim it. */ - vm_page_lock_queues(); - vm_page_flag_set(pp, PG_REFERENCED); + vm_page_reference(pp); vm_page_sleep(pp, "zfsmwb"); continue; } Modified: head/sys/fs/tmpfs/tmpfs_vnops.c ============================================================================== --- head/sys/fs/tmpfs/tmpfs_vnops.c Tue Sep 6 10:21:33 2011 (r225417) +++ head/sys/fs/tmpfs/tmpfs_vnops.c Tue Sep 6 10:30:11 2011 (r225418) @@ -518,8 +518,7 @@ lookupvpg: * Reference the page before unlocking and sleeping so * that the page daemon is less likely to reclaim it. */ - vm_page_lock_queues(); - vm_page_flag_set(m, PG_REFERENCED); + vm_page_reference(m); vm_page_sleep(m, "tmfsmr"); goto lookupvpg; } @@ -538,8 +537,7 @@ lookupvpg: * Reference the page before unlocking and sleeping so * that the page daemon is less likely to reclaim it. */ - vm_page_lock_queues(); - vm_page_flag_set(m, PG_REFERENCED); + vm_page_reference(m); vm_page_sleep(m, "tmfsmr"); goto lookupvpg; } @@ -650,8 +648,7 @@ lookupvpg: * Reference the page before unlocking and sleeping so * that the page daemon is less likely to reclaim it. */ - vm_page_lock_queues(); - vm_page_flag_set(vpg, PG_REFERENCED); + vm_page_reference(vpg); vm_page_sleep(vpg, "tmfsmw"); goto lookupvpg; } Modified: head/sys/i386/i386/pmap.c ============================================================================== --- head/sys/i386/i386/pmap.c Tue Sep 6 10:21:33 2011 (r225417) +++ head/sys/i386/i386/pmap.c Tue Sep 6 10:30:11 2011 (r225418) @@ -2207,7 +2207,7 @@ pmap_collect(pmap_t locked_pmap, struct KASSERT((tpte & PG_W) == 0, ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); free = NULL; @@ -2221,7 +2221,7 @@ pmap_collect(pmap_t locked_pmap, struct } if (TAILQ_EMPTY(&m->md.pv_list) && TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } sched_unpin(); } @@ -2461,7 +2461,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } } @@ -2714,10 +2714,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpde & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if (TAILQ_EMPTY(&m->md.pv_list) && TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } } if (pmap == kernel_pmap) { @@ -2763,7 +2763,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); pmap_remove_entry(pmap, m, va); } return (pmap_unuse_pt(pmap, va, free)); @@ -2953,7 +2953,7 @@ pmap_remove_all(vm_page_t m) if (tpte & PG_W) pmap->pm_stats.wired_count--; if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. @@ -2966,7 +2966,7 @@ pmap_remove_all(vm_page_t m) free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); vm_page_unlock_queues(); pmap_free_zero_pages(free); @@ -3413,7 +3413,7 @@ validate: if ((prot & VM_PROT_WRITE) != 0) { newpte |= PG_RW; if ((newpte & PG_MANAGED) != 0) - vm_page_flag_set(m, PG_WRITEABLE); + vm_page_aflag_set(m, PGA_WRITEABLE); } #ifdef PAE if ((prot & VM_PROT_EXECUTE) == 0) @@ -3439,7 +3439,7 @@ validate: origpte = pte_load_store(pte, newpte); if (origpte & PG_A) { if (origpte & PG_MANAGED) - vm_page_flag_set(om, PG_REFERENCED); + vm_page_aflag_set(om, PGA_REFERENCED); if (opa != VM_PAGE_TO_PHYS(m)) invlva = TRUE; #ifdef PAE @@ -3457,7 +3457,7 @@ validate: if ((origpte & PG_MANAGED) != 0 && TAILQ_EMPTY(&om->md.pv_list) && TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)) - vm_page_flag_clear(om, PG_WRITEABLE); + vm_page_aflag_clear(om, PGA_WRITEABLE); if (invlva) pmap_invalidate_page(pmap, va); } else @@ -4287,7 +4287,7 @@ pmap_remove_pages(pmap_t pmap) if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) if (TAILQ_EMPTY(&mt->md.pv_list)) - vm_page_flag_clear(mt, PG_WRITEABLE); + vm_page_aflag_clear(mt, PGA_WRITEABLE); } mpte = pmap_lookup_pt_page(pmap, pv->pv_va); if (mpte != NULL) { @@ -4305,7 +4305,7 @@ pmap_remove_pages(pmap_t pmap) if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } pmap_unuse_pt(pmap, pv->pv_va, &free); } @@ -4345,13 +4345,13 @@ pmap_is_modified(vm_page_t m) ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); vm_page_lock_queues(); rv = pmap_is_modified_pvh(&m->md) || @@ -4478,13 +4478,13 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by - * another thread while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); sched_pin(); @@ -4522,7 +4522,7 @@ retry: } PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); vm_page_unlock_queues(); } @@ -4633,11 +4633,11 @@ pmap_clear_modify(vm_page_t m) ("pmap_clear_modify: page %p is busy", m)); /* - * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. */ - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); sched_pin(); Modified: head/sys/i386/xen/pmap.c ============================================================================== --- head/sys/i386/xen/pmap.c Tue Sep 6 10:21:33 2011 (r225417) +++ head/sys/i386/xen/pmap.c Tue Sep 6 10:30:11 2011 (r225418) @@ -2037,7 +2037,7 @@ pmap_collect(pmap_t locked_pmap, struct KASSERT((tpte & PG_W) == 0, ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); free = NULL; @@ -2050,7 +2050,7 @@ pmap_collect(pmap_t locked_pmap, struct PMAP_UNLOCK(pmap); } if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } sched_unpin(); } @@ -2222,7 +2222,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t mtx_assert(&vm_page_queue_mtx, MA_OWNED); pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } /* @@ -2274,7 +2274,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); pmap_remove_entry(pmap, m, va); } return (pmap_unuse_pt(pmap, va, free)); @@ -2446,7 +2446,7 @@ pmap_remove_all(vm_page_t m) if (tpte & PG_W) pmap->pm_stats.wired_count--; if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. @@ -2459,7 +2459,7 @@ pmap_remove_all(vm_page_t m) free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); PT_UPDATES_FLUSH(); if (*PMAP1) PT_SET_MA(PADDR1, 0); @@ -2739,7 +2739,7 @@ validate: if ((prot & VM_PROT_WRITE) != 0) { newpte |= PG_RW; if ((newpte & PG_MANAGED) != 0) - vm_page_flag_set(m, PG_WRITEABLE); + vm_page_aflag_set(m, PGA_WRITEABLE); } #ifdef PAE if ((prot & VM_PROT_EXECUTE) == 0) @@ -2764,7 +2764,7 @@ validate: PT_SET_VA(pte, newpte | PG_A, FALSE); if (origpte & PG_A) { if (origpte & PG_MANAGED) - vm_page_flag_set(om, PG_REFERENCED); + vm_page_aflag_set(om, PGA_REFERENCED); if (opa != VM_PAGE_TO_PHYS(m)) invlva = TRUE; #ifdef PAE @@ -2781,7 +2781,7 @@ validate: } if ((origpte & PG_MANAGED) != 0 && TAILQ_EMPTY(&om->md.pv_list)) - vm_page_flag_clear(om, PG_WRITEABLE); + vm_page_aflag_clear(om, PGA_WRITEABLE); if (invlva) pmap_invalidate_page(pmap, va); } else{ @@ -3549,7 +3549,7 @@ pmap_remove_pages(pmap_t pmap) TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); pmap_unuse_pt(pmap, pv->pv_va, &free); @@ -3604,13 +3604,13 @@ pmap_is_modified(vm_page_t m) rv = FALSE; /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return (rv); vm_page_lock_queues(); sched_pin(); @@ -3735,13 +3735,13 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by - * another thread while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); sched_pin(); @@ -3769,7 +3769,7 @@ retry: } PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); PT_UPDATES_FLUSH(); if (*PMAP1) PT_SET_MA(PADDR1, 0); @@ -3846,11 +3846,11 @@ pmap_clear_modify(vm_page_t m) ("pmap_clear_modify: page %p is busy", m)); /* - * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. */ - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); sched_pin(); Modified: head/sys/ia64/ia64/pmap.c ============================================================================== --- head/sys/ia64/ia64/pmap.c Tue Sep 6 10:21:33 2011 (r225417) +++ head/sys/ia64/ia64/pmap.c Tue Sep 6 10:30:11 2011 (r225418) @@ -804,7 +804,7 @@ retry: pmap_invalidate_page(va); pmap_switch(oldpmap); if (pmap_accessed(pte)) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if (pmap_dirty(pte)) vm_page_dirty(m); pmap_free_pte(pte, va); @@ -819,7 +819,7 @@ retry: free_pv_entry(pv); } if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } if (allocated_pv == NULL) { if (vpq == &vm_page_queues[PQ_INACTIVE]) { @@ -972,7 +972,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; if (TAILQ_FIRST(&m->md.pv_list) == NULL) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); @@ -1198,7 +1198,7 @@ pmap_remove_pte(pmap_t pmap, struct ia64 if (pmap_dirty(pte)) vm_page_dirty(m); if (pmap_accessed(pte)) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); error = pmap_remove_entry(pmap, m, va, pv); } @@ -1460,7 +1460,7 @@ pmap_remove_all(vm_page_t m) pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); vm_page_unlock_queues(); } @@ -1647,7 +1647,7 @@ validate: ia64_sync_icache(va, PAGE_SIZE); if ((prot & VM_PROT_WRITE) != 0 && managed) - vm_page_flag_set(m, PG_WRITEABLE); + vm_page_aflag_set(m, PGA_WRITEABLE); vm_page_unlock_queues(); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); @@ -2048,13 +2048,13 @@ pmap_is_modified(vm_page_t m) rv = FALSE; /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can be dirty. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return (rv); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2139,11 +2139,11 @@ pmap_clear_modify(vm_page_t m) ("pmap_clear_modify: page %p is busy", m)); /* - * If the page is not PG_WRITEABLE, then no PTEs can be modified. + * If the page is not PGA_WRITEABLE, then no PTEs can be modified. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. */ - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2206,13 +2206,13 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by - * another thread while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2235,7 +2235,7 @@ pmap_remove_write(vm_page_t m) pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); vm_page_unlock_queues(); } Modified: head/sys/mips/mips/pmap.c ============================================================================== --- head/sys/mips/mips/pmap.c Tue Sep 6 10:21:33 2011 (r225417) +++ head/sys/mips/mips/pmap.c Tue Sep 6 10:30:11 2011 (r225418) @@ -1432,7 +1432,7 @@ retry: KASSERT(!pte_test(&oldpte, PTE_W), ("wired pte for unwired page")); if (m->md.pv_flags & PV_TABLE_REF) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if (pte_test(&oldpte, PTE_D)) vm_page_dirty(m); pmap_invalidate_page(pmap, va); @@ -1448,7 +1448,7 @@ retry: free_pv_entry(pv); } if (TAILQ_EMPTY(&m->md.pv_list)) { - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); } } @@ -1527,7 +1527,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t mtx_assert(&vm_page_queue_mtx, MA_OWNED); pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } /* @@ -1589,7 +1589,7 @@ pmap_remove_pte(struct pmap *pmap, pt_en vm_page_dirty(m); } if (m->md.pv_flags & PV_TABLE_REF) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); pmap_remove_entry(pmap, m, va); @@ -1713,7 +1713,7 @@ pmap_remove_all(vm_page_t m) vm_page_lock_queues(); if (m->md.pv_flags & PV_TABLE_REF) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { PMAP_LOCK(pv->pv_pmap); @@ -1757,7 +1757,7 @@ pmap_remove_all(vm_page_t m) free_pv_entry(pv); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); vm_page_unlock_queues(); } @@ -2004,7 +2004,7 @@ validate: *pte = newpte; if (page_is_managed(opa) && (opa != pa)) { if (om->md.pv_flags & PV_TABLE_REF) - vm_page_flag_set(om, PG_REFERENCED); + vm_page_aflag_set(om, PGA_REFERENCED); om->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); } @@ -2017,7 +2017,7 @@ validate: } if (page_is_managed(opa) && TAILQ_EMPTY(&om->md.pv_list)) - vm_page_flag_clear(om, PG_WRITEABLE); + vm_page_aflag_clear(om, PGA_WRITEABLE); } else { *pte = newpte; } @@ -2535,7 +2535,7 @@ pmap_remove_pages(pmap_t pmap) m->md.pv_list_count--; TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_FIRST(&m->md.pv_list) == NULL) { - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); @@ -2615,7 +2615,7 @@ pmap_changebit(vm_page_t m, int bit, boo PMAP_UNLOCK(pv->pv_pmap); } if (!setem && bit == PTE_D) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } /* @@ -2662,13 +2662,13 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by - * another thread while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return; /* @@ -2685,7 +2685,7 @@ pmap_remove_write(vm_page_t m) pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, VM_PROT_READ | VM_PROT_EXECUTE); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); vm_page_unlock_queues(); } @@ -2724,13 +2724,13 @@ pmap_is_modified(vm_page_t m) ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PTE_D set. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); vm_page_lock_queues(); if (m->md.pv_flags & PV_TABLE_MOD) @@ -2781,11 +2781,11 @@ pmap_clear_modify(vm_page_t m) ("pmap_clear_modify: page %p is busy", m)); /* - * If the page is not PG_WRITEABLE, then no PTEs can have PTE_D set. + * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. */ - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); if (m->md.pv_flags & PV_TABLE_MOD) { @@ -2929,7 +2929,7 @@ retry: * determine if the address is MINCORE_REFERENCED. */ m = PHYS_TO_VM_PAGE(pa); - if ((m->flags & PG_REFERENCED) != 0) + if ((m->aflags & PGA_REFERENCED) != 0) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; } if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != @@ -3185,7 +3185,7 @@ init_pte_prot(vm_offset_t va, vm_page_t rw = PTE_V | PTE_D | PTE_C_CACHE; else rw = PTE_V | PTE_C_CACHE; - vm_page_flag_set(m, PG_WRITEABLE); + vm_page_aflag_set(m, PGA_WRITEABLE); } else /* Needn't emulate a modified bit for unmanaged pages. */ rw = PTE_V | PTE_D | PTE_C_CACHE; Modified: head/sys/powerpc/aim/mmu_oea.c ============================================================================== --- head/sys/powerpc/aim/mmu_oea.c Tue Sep 6 10:21:33 2011 (r225417) +++ head/sys/powerpc/aim/mmu_oea.c Tue Sep 6 10:30:11 2011 (r225418) @@ -1102,7 +1102,7 @@ moea_enter_locked(pmap_t pmap, vm_offset pte_lo |= PTE_BW; if (pmap_bootstrapped && (m->oflags & VPO_UNMANAGED) == 0) - vm_page_flag_set(m, PG_WRITEABLE); + vm_page_aflag_set(m, PGA_WRITEABLE); } else pte_lo |= PTE_BR; @@ -1255,13 +1255,13 @@ moea_is_modified(mmu_t mmu, vm_page_t m) ("moea_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PTE_CHG set. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); return (moea_query_bit(m, PTE_CHG)); } @@ -1299,11 +1299,11 @@ moea_clear_modify(mmu_t mmu, vm_page_t m ("moea_clear_modify: page %p is busy", m)); /* - * If the page is not PG_WRITEABLE, then no PTEs can have PTE_CHG + * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG * set. If the object containing the page is locked and the page is - * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. */ - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->aflags & PGA_WRITEABLE) == 0) return; moea_clear_bit(m, PTE_CHG); } @@ -1323,13 +1323,13 @@ moea_remove_write(mmu_t mmu, vm_page_t m ("moea_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by - * another thread while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***