Date: Sun, 23 Jun 2013 22:27:57 +0000 (UTC) From: Attilio Rao <attilio@FreeBSD.org> To: src-committers@freebsd.org, svn-src-user@freebsd.org Subject: svn commit: r252130 - in user/attilio/vmobj-readlock/sys: amd64/amd64 arm/arm cddl/contrib/opensolaris/uts/common/fs/zfs dev/agp dev/drm2/i915 dev/drm2/ttm dev/md fs/fuse fs/tmpfs i386/i386 i386/xe... Message-ID: <201306232227.r5NMRvZb007201@svn.freebsd.org>
index | next in thread | raw e-mail
Author: attilio Date: Sun Jun 23 22:27:57 2013 New Revision: 252130 URL: http://svnweb.freebsd.org/changeset/base/252130 Log: - Introduce the read/write busy concept switching the KPI on a real rwlock-style lock. - Add accessor KPI to cope with this change. - The KPI relies on the object lock to be held still in write mode for all the places assuming the busy lock state won't change. - The KPI is not yet inlined because of the assert dependency on vm_object operations. When it will be implemented via atomics such dependency will go and we will use inline macros. - Remove vm_page_wakeup_locked() because in the future once the atomics approach is implemented the concurrency on the page lock will be possibly minimal. Sponsored by: EMC / Isilon storage division Modified: user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c user/attilio/vmobj-readlock/sys/arm/arm/pmap.c user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c user/attilio/vmobj-readlock/sys/dev/agp/agp.c user/attilio/vmobj-readlock/sys/dev/drm2/i915/i915_gem.c user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_bo_vm.c user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_tt.c user/attilio/vmobj-readlock/sys/dev/md/md.c user/attilio/vmobj-readlock/sys/fs/fuse/fuse_vnops.c user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_subr.c user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c user/attilio/vmobj-readlock/sys/i386/i386/pmap.c user/attilio/vmobj-readlock/sys/i386/xen/pmap.c user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c user/attilio/vmobj-readlock/sys/kern/kern_exec.c user/attilio/vmobj-readlock/sys/kern/sys_process.c user/attilio/vmobj-readlock/sys/kern/uipc_shm.c user/attilio/vmobj-readlock/sys/kern/uipc_syscalls.c user/attilio/vmobj-readlock/sys/kern/vfs_bio.c user/attilio/vmobj-readlock/sys/kern/vfs_cluster.c user/attilio/vmobj-readlock/sys/mips/mips/pmap.c user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c user/attilio/vmobj-readlock/sys/powerpc/booke/pmap.c user/attilio/vmobj-readlock/sys/sparc64/sparc64/pmap.c user/attilio/vmobj-readlock/sys/vm/phys_pager.c user/attilio/vmobj-readlock/sys/vm/swap_pager.c user/attilio/vmobj-readlock/sys/vm/vm_fault.c user/attilio/vmobj-readlock/sys/vm/vm_glue.c user/attilio/vmobj-readlock/sys/vm/vm_kern.c user/attilio/vmobj-readlock/sys/vm/vm_object.c user/attilio/vmobj-readlock/sys/vm/vm_page.c user/attilio/vmobj-readlock/sys/vm/vm_page.h user/attilio/vmobj-readlock/sys/vm/vm_pageout.c user/attilio/vmobj-readlock/sys/vm/vm_phys.c Modified: user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c ============================================================================== --- user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c Sun Jun 23 22:27:57 2013 (r252130) @@ -3451,7 +3451,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); - if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m)) VM_OBJECT_ASSERT_WLOCKED(m->object); pa = VM_PAGE_TO_PHYS(m); newpte = (pt_entry_t)(pa | PG_A | PG_V); @@ -4538,13 +4538,12 @@ pmap_is_modified(vm_page_t m) ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * If the page is not write busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); rw_wlock(&pvh_global_lock); rv = pmap_is_modified_pvh(&m->md) || @@ -4669,13 +4668,12 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); if ((m->flags & PG_FICTITIOUS) != 0) @@ -4818,13 +4816,13 @@ pmap_clear_modify(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_BUSY) == 0, - ("pmap_clear_modify: page %p is busy", m)); + KASSERT(!vm_page_busy_wlocked(m), + ("pmap_clear_modify: page %p is write locked", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. + * write busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; Modified: user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c ============================================================================== --- user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c Sun Jun 23 22:27:57 2013 (r252130) @@ -2672,8 +2672,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset pa = systempage.pv_pa; m = NULL; } else { - KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || - (flags & M_NOWAIT) != 0, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || + vm_page_busy_wlocked(m) || (flags & M_NOWAIT) != 0, ("pmap_enter_locked: page %p is not busy", m)); pa = VM_PAGE_TO_PHYS(m); } @@ -3931,13 +3931,12 @@ pmap_is_modified(vm_page_t m) ("pmap_is_modified: page %p is not managed", m)); rv = FALSE; /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * If the page is not write busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return (rv); rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -3965,13 +3964,13 @@ pmap_clear_modify(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_BUSY) == 0, - ("pmap_clear_modify: page %p is busy", m)); + KASSERT(!vm_page_busy_wlocked(m), + ("pmap_clear_modify: page %p is write locked", m)); /* * If the page is not PGA_WRITEABLE, then no mappings can be modified. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. + * write busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; @@ -4006,13 +4005,12 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) != 0 || - (m->aflags & PGA_WRITEABLE) != 0) + if (vm_page_busy_wlocked(m) || (m->aflags & PGA_WRITEABLE) != 0) pmap_clearbit(m, PVF_WRITE); } Modified: user/attilio/vmobj-readlock/sys/arm/arm/pmap.c ============================================================================== --- user/attilio/vmobj-readlock/sys/arm/arm/pmap.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/arm/arm/pmap.c Sun Jun 23 22:27:57 2013 (r252130) @@ -3319,8 +3319,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset pa = systempage.pv_pa; m = NULL; } else { - KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || - (flags & M_NOWAIT) != 0, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || + vm_page_busy_wlocked(m) || (flags & M_NOWAIT) != 0, ("pmap_enter_locked: page %p is not busy", m)); pa = VM_PAGE_TO_PHYS(m); } @@ -4555,13 +4555,13 @@ pmap_clear_modify(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_BUSY) == 0, - ("pmap_clear_modify: page %p is busy", m)); + KASSERT(!vm_page_busy_wlocked(m), + ("pmap_clear_modify: page %p is write locked", m)); /* * If the page is not PGA_WRITEABLE, then no mappings can be modified. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. + * write busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; @@ -4612,13 +4612,12 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) != 0 || - (m->aflags & PGA_WRITEABLE) != 0) + if (vm_page_busy_wlocked(m) || (m->aflags & PGA_WRITEABLE) != 0) pmap_clearbit(m, PVF_WRITE); } Modified: user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c ============================================================================== --- user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Sun Jun 23 22:27:57 2013 (r252130) @@ -336,7 +336,7 @@ page_busy(vnode_t *vp, int64_t start, in for (;;) { if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL && pp->valid) { - if ((pp->oflags & VPO_BUSY) != 0) { + if (vm_page_busy_wlocked(pp)) { /* * Reference the page before unlocking and * sleeping so that the page daemon is less @@ -349,7 +349,7 @@ page_busy(vnode_t *vp, int64_t start, in zfs_vmobject_wlock(obj); continue; } - vm_page_io_start(pp); + vm_page_busy_rlock(pp); } else pp = NULL; @@ -375,7 +375,7 @@ static void page_unbusy(vm_page_t pp, boolean_t unalloc) { - vm_page_io_finish(pp); + vm_page_busy_runlock(pp); if (unalloc) vm_object_pip_subtract(pp->object, 1); } @@ -466,7 +466,7 @@ update_pages(vnode_t *vp, int64_t start, * ZFS to populate a range of page cache pages with data. * * NOTE: this function could be optimized to pre-allocate - * all pages in advance, drain VPO_BUSY on all of them, + * all pages in advance, drain write busy on all of them, * map them into contiguous KVA region and populate them * in one single dmu_read() call. */ @@ -505,7 +505,7 @@ mappedread_sf(vnode_t *vp, int nbytes, u bzero(va + bytes, PAGESIZE - bytes); zfs_unmap_page(sf); zfs_vmobject_wlock(obj); - vm_page_io_finish(pp); + vm_page_busy_runlock(pp); vm_page_lock(pp); if (error) { vm_page_free(pp); @@ -515,7 +515,7 @@ mappedread_sf(vnode_t *vp, int nbytes, u } vm_page_unlock(pp); } else - vm_page_io_finish(pp); + vm_page_busy_runlock(pp); if (error) break; uio->uio_resid -= bytes; Modified: user/attilio/vmobj-readlock/sys/dev/agp/agp.c ============================================================================== --- user/attilio/vmobj-readlock/sys/dev/agp/agp.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/dev/agp/agp.c Sun Jun 23 22:27:57 2013 (r252130) @@ -600,7 +600,7 @@ agp_generic_bind_memory(device_t dev, st goto bad; } } - vm_page_wakeup(m); + vm_page_busy_wunlock(m); } VM_OBJECT_WUNLOCK(mem->am_obj); @@ -626,9 +626,9 @@ bad: VM_OBJECT_ASSERT_WLOCKED(mem->am_obj); for (k = 0; k < mem->am_size; k += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k)); - vm_page_lock(m); if (k >= i) - vm_page_wakeup_locked(m); + vm_page_busy_wunlock(m); + vm_page_lock(m); vm_page_unwire(m, 0); vm_page_unlock(m); } Modified: user/attilio/vmobj-readlock/sys/dev/drm2/i915/i915_gem.c ============================================================================== --- user/attilio/vmobj-readlock/sys/dev/drm2/i915/i915_gem.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/dev/drm2/i915/i915_gem.c Sun Jun 23 22:27:57 2013 (r252130) @@ -1378,7 +1378,7 @@ retry: VM_OBJECT_WLOCK(vm_obj); m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset)); if (m != NULL) { - if ((m->flags & VPO_BUSY) != 0) { + if (vm_page_busy_locked(m)) { DRM_UNLOCK(dev); vm_page_lock(m); VM_OBJECT_WUNLOCK(vm_obj); @@ -1436,7 +1436,7 @@ retry: ("not fictitious %p", m)); KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m)); - if ((m->flags & VPO_BUSY) != 0) { + if (vm_page_busy_locked(m)) { DRM_UNLOCK(dev); vm_page_lock(m); VM_OBJECT_WUNLOCK(vm_obj); @@ -1447,7 +1447,7 @@ retry: vm_page_insert(m, vm_obj, OFF_TO_IDX(offset)); have_page: *mres = m; - vm_page_busy(m); + vm_page_busy_wlock(m); CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot, m->phys_addr); @@ -2528,8 +2528,8 @@ i915_gem_wire_page(vm_object_t object, v } vm_page_lock(m); vm_page_wire(m); - vm_page_wakeup_locked(m); vm_page_unlock(m); + vm_page_busy_wunlock(m); atomic_add_long(&i915_gem_wired_pages_cnt, 1); return (m); } Modified: user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_bo_vm.c ============================================================================== --- user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_bo_vm.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_bo_vm.c Sun Jun 23 22:27:57 2013 (r252130) @@ -212,7 +212,7 @@ reserve: } VM_OBJECT_WLOCK(vm_obj); - if ((m->flags & VPO_BUSY) != 0) { + if (vm_page_busy_locked(m)) { vm_page_lock(m); VM_OBJECT_WUNLOCK(vm_obj); vm_page_sleep(m, "ttmpbs"); @@ -226,7 +226,7 @@ reserve: vm_page_lock(m); vm_page_insert(m, vm_obj, OFF_TO_IDX(offset)); vm_page_unlock(m); - vm_page_busy(m); + vm_page_busy_wlock(m); if (oldm != NULL) { vm_page_lock(oldm); Modified: user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_tt.c ============================================================================== --- user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_tt.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_tt.c Sun Jun 23 22:27:57 2013 (r252130) @@ -302,7 +302,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) } else vm_page_zero_invalid(from_page, TRUE); } - vm_page_wakeup(from_page); + vm_page_busy_wunlock(from_page); to_page = ttm->pages[i]; if (unlikely(to_page == NULL)) { ret = -ENOMEM; @@ -355,7 +355,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, v pmap_copy_page(from_page, to_page); vm_page_dirty(to_page); to_page->valid = VM_PAGE_BITS_ALL; - vm_page_wakeup(to_page); + vm_page_busy_wunlock(to_page); } vm_object_pip_wakeup(obj); VM_OBJECT_WUNLOCK(obj); Modified: user/attilio/vmobj-readlock/sys/dev/md/md.c ============================================================================== --- user/attilio/vmobj-readlock/sys/dev/md/md.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/dev/md/md.c Sun Jun 23 22:27:57 2013 (r252130) @@ -834,7 +834,7 @@ mdstart_swap(struct md_s *sc, struct bio else rv = vm_pager_get_pages(sc->object, &m, 1, 0); if (rv == VM_PAGER_ERROR) { - vm_page_wakeup(m); + vm_page_busy_wunlock(m); break; } else if (rv == VM_PAGER_FAIL) { /* @@ -859,7 +859,7 @@ mdstart_swap(struct md_s *sc, struct bio else rv = VM_PAGER_OK; if (rv == VM_PAGER_ERROR) { - vm_page_wakeup(m); + vm_page_busy_wunlock(m); break; } if ((bp->bio_flags & BIO_UNMAPPED) != 0) { @@ -875,7 +875,7 @@ mdstart_swap(struct md_s *sc, struct bio else rv = VM_PAGER_OK; if (rv == VM_PAGER_ERROR) { - vm_page_wakeup(m); + vm_page_busy_wunlock(m); break; } if (len != PAGE_SIZE) { @@ -885,8 +885,8 @@ mdstart_swap(struct md_s *sc, struct bio } else vm_pager_page_unswapped(m); } + vm_page_busy_wunlock(m); vm_page_lock(m); - vm_page_wakeup_locked(m); if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE) vm_page_free(m); else Modified: user/attilio/vmobj-readlock/sys/fs/fuse/fuse_vnops.c ============================================================================== --- user/attilio/vmobj-readlock/sys/fs/fuse/fuse_vnops.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/fs/fuse/fuse_vnops.c Sun Jun 23 22:27:57 2013 (r252130) @@ -1873,8 +1873,8 @@ fuse_vnop_getpages(struct vop_getpages_a vm_page_activate(m); else vm_page_deactivate(m); - vm_page_wakeup_locked(m); fuse_vm_page_unlock(m); + vm_page_busy_wunlock(m); } else { fuse_vm_page_lock(m); vm_page_free(m); Modified: user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_subr.c ============================================================================== --- user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_subr.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_subr.c Sun Jun 23 22:27:57 2013 (r252130) @@ -1351,8 +1351,8 @@ retry: vm_page_lock(m); if (rv == VM_PAGER_OK) { vm_page_deactivate(m); - vm_page_wakeup_locked(m); vm_page_unlock(m); + vm_page_busy_wunlock(m); } else { vm_page_free(m); vm_page_unlock(m); Modified: user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c ============================================================================== --- user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c Sun Jun 23 22:27:57 2013 (r252130) @@ -479,12 +479,12 @@ tmpfs_nocacheread(vm_object_t tobj, vm_p } else vm_page_zero_invalid(m, TRUE); } - vm_page_wakeup(m); - vm_page_io_start(m); + vm_page_busy_wunlock(m); + vm_page_busy_rlock(m); VM_OBJECT_WUNLOCK(tobj); error = uiomove_fromphys(&m, offset, tlen, uio); VM_OBJECT_WLOCK(tobj); - vm_page_io_finish(m); + vm_page_busy_runlock(m); VM_OBJECT_WUNLOCK(tobj); vm_page_lock(m); if (m->queue == PQ_NONE) { @@ -594,12 +594,12 @@ tmpfs_mappedwrite(vm_object_t tobj, size } else vm_page_zero_invalid(tpg, TRUE); } - vm_page_wakeup(tpg); - vm_page_io_start(tpg); + vm_page_busy_wunlock(tpg); + vm_page_busy_rlock(tpg); VM_OBJECT_WUNLOCK(tobj); error = uiomove_fromphys(&tpg, offset, tlen, uio); VM_OBJECT_WLOCK(tobj); - vm_page_io_finish(tpg); + vm_page_busy_runlock(tpg); if (error == 0) vm_page_dirty(tpg); vm_page_lock(tpg); Modified: user/attilio/vmobj-readlock/sys/i386/i386/pmap.c ============================================================================== --- user/attilio/vmobj-readlock/sys/i386/i386/pmap.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/i386/i386/pmap.c Sun Jun 23 22:27:57 2013 (r252130) @@ -3422,7 +3422,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va)); - if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m)) VM_OBJECT_ASSERT_WLOCKED(m->object); mpte = NULL; @@ -4516,13 +4516,12 @@ pmap_is_modified(vm_page_t m) ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * If the page is not write busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); rw_wlock(&pvh_global_lock); rv = pmap_is_modified_pvh(&m->md) || @@ -4651,13 +4650,12 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); sched_pin(); @@ -4808,13 +4806,13 @@ pmap_clear_modify(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_BUSY) == 0, - ("pmap_clear_modify: page %p is busy", m)); + KASSERT(!vm_page_busy_wlocked(m), + ("pmap_clear_modify: page %p is write locked", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. + * write busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; Modified: user/attilio/vmobj-readlock/sys/i386/xen/pmap.c ============================================================================== --- user/attilio/vmobj-readlock/sys/i386/xen/pmap.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/i386/xen/pmap.c Sun Jun 23 22:27:57 2013 (r252130) @@ -2667,7 +2667,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va)); - if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m)) VM_OBJECT_ASSERT_WLOCKED(m->object); mpte = NULL; @@ -3696,13 +3696,12 @@ pmap_is_modified(vm_page_t m) rv = FALSE; /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * If the page is not write busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return (rv); rw_wlock(&pvh_global_lock); sched_pin(); @@ -3827,13 +3826,12 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); sched_pin(); @@ -3933,13 +3931,13 @@ pmap_clear_modify(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_BUSY) == 0, - ("pmap_clear_modify: page %p is busy", m)); + KASSERT(!vm_page_busy_wlocked(m), + ("pmap_clear_modify: page %p is write locked", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. + * write busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; Modified: user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c ============================================================================== --- user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c Sun Jun 23 22:27:57 2013 (r252130) @@ -1677,7 +1677,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, va &= ~PAGE_MASK; KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); - KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_busy_wlocked(m), ("pmap_enter: page %p is not busy", m)); /* @@ -2234,13 +2234,12 @@ pmap_is_modified(vm_page_t m) rv = FALSE; /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * If the page is not write busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can be dirty. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return (rv); rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2323,13 +2322,13 @@ pmap_clear_modify(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_BUSY) == 0, - ("pmap_clear_modify: page %p is busy", m)); + KASSERT(!vm_page_busy_wlocked(m), + ("pmap_clear_modify: page %p is write locked", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can be modified. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. + * write busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; @@ -2396,13 +2395,12 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { Modified: user/attilio/vmobj-readlock/sys/kern/kern_exec.c ============================================================================== --- user/attilio/vmobj-readlock/sys/kern/kern_exec.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/kern/kern_exec.c Sun Jun 23 22:27:57 2013 (r252130) @@ -946,9 +946,8 @@ exec_map_first_page(imgp) if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { if (ma[i]->valid) break; - if ((ma[i]->oflags & VPO_BUSY) || ma[i]->busy) + if (vm_page_busy_trywlock(ma[i])) break; - vm_page_busy(ma[i]); } else { ma[i] = vm_page_alloc(object, i, VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED); @@ -969,8 +968,8 @@ exec_map_first_page(imgp) return (EIO); } } + vm_page_busy_wunlock(ma[0]); vm_page_lock(ma[0]); - vm_page_wakeup_locked(ma[0]); vm_page_hold(ma[0]); vm_page_unlock(ma[0]); VM_OBJECT_WUNLOCK(object); Modified: user/attilio/vmobj-readlock/sys/kern/sys_process.c ============================================================================== --- user/attilio/vmobj-readlock/sys/kern/sys_process.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/kern/sys_process.c Sun Jun 23 22:27:57 2013 (r252130) @@ -317,7 +317,7 @@ proc_rwmem(struct proc *p, struct uio *u * Release the page. */ VM_OBJECT_WLOCK(m->object); - vm_page_io_finish(m); + vm_page_busy_runlock(m); VM_OBJECT_WUNLOCK(m->object); } while (error == 0 && uio->uio_resid > 0); Modified: user/attilio/vmobj-readlock/sys/kern/uipc_shm.c ============================================================================== --- user/attilio/vmobj-readlock/sys/kern/uipc_shm.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/kern/uipc_shm.c Sun Jun 23 22:27:57 2013 (r252130) @@ -301,8 +301,8 @@ retry: vm_page_lock(m); if (rv == VM_PAGER_OK) { vm_page_deactivate(m); - vm_page_wakeup_locked(m); vm_page_unlock(m); + vm_page_busy_wunlock(m); } else { vm_page_free(m); vm_page_unlock(m); Modified: user/attilio/vmobj-readlock/sys/kern/uipc_syscalls.c ============================================================================== --- user/attilio/vmobj-readlock/sys/kern/uipc_syscalls.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/kern/uipc_syscalls.c Sun Jun 23 22:27:57 2013 (r252130) @@ -2245,7 +2245,7 @@ retry_space: * then free it. */ if (pg->wire_count == 0 && pg->valid == 0 && - pg->busy == 0 && !(pg->oflags & VPO_BUSY)) + !vm_page_busy_locked(pg)) vm_page_free(pg); vm_page_unlock(pg); VM_OBJECT_WUNLOCK(obj); Modified: user/attilio/vmobj-readlock/sys/kern/vfs_bio.c ============================================================================== --- user/attilio/vmobj-readlock/sys/kern/vfs_bio.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/kern/vfs_bio.c Sun Jun 23 22:27:57 2013 (r252130) @@ -1855,8 +1855,7 @@ vfs_vmio_release(struct buf *bp) * buffer was used for direct I/O */ if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) { - if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 && - m->wire_count == 0) + if (m->wire_count == 0 && !vm_page_busy_locked(m)) vm_page_free(m); } else if (bp->b_flags & B_DIRECT) vm_page_try_to_free(m); @@ -3487,10 +3486,10 @@ allocbuf(struct buf *bp, int size) * here could interfere with paging I/O, no * matter which process we are. * - * We can only test VPO_BUSY here. Blocking on - * m->busy might lead to deadlocks once - * allocbuf() is called after pages are - * vfs_busy_pages(). + * We can only test write busy here. + * Blocking on read busy might lead to + * deadlocks once allocbuf() is called after + * pages are vfs_busy_pages(). */ m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + bp->b_npages, VM_ALLOC_NOBUSY | @@ -3850,7 +3849,7 @@ bufdone_finish(struct buf *bp) vfs_page_set_valid(bp, foff, m); } - vm_page_io_finish(m); + vm_page_busy_runlock(m); vm_object_pip_subtract(obj, 1); foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; iosize -= resid; @@ -3912,7 +3911,7 @@ vfs_unbusy_pages(struct buf *bp) BUF_CHECK_UNMAPPED(bp); } vm_object_pip_subtract(obj, 1); - vm_page_io_finish(m); + vm_page_busy_runlock(m); } vm_object_pip_wakeupn(obj, 0); VM_OBJECT_WUNLOCK(obj); @@ -3985,8 +3984,8 @@ vfs_page_set_validclean(struct buf *bp, } /* - * Ensure that all buffer pages are not busied by VPO_BUSY flag. If - * any page is busy, drain the flag. + * Ensure that all buffer pages are not write busied. If any page is write + * busy, drain it. */ static void vfs_drain_busy_pages(struct buf *bp) @@ -3998,10 +3997,10 @@ vfs_drain_busy_pages(struct buf *bp) last_busied = 0; for (i = 0; i < bp->b_npages; i++) { m = bp->b_pages[i]; - if ((m->oflags & VPO_BUSY) != 0) { + if (vm_page_busy_wlocked(m)) { for (; last_busied < i; last_busied++) - vm_page_busy(bp->b_pages[last_busied]); - while ((m->oflags & VPO_BUSY) != 0) { + vm_page_busy_wlock(bp->b_pages[last_busied]); + while (vm_page_busy_wlocked(m)) { vm_page_lock(m); VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); vm_page_sleep(m, "vbpage"); @@ -4010,14 +4009,14 @@ vfs_drain_busy_pages(struct buf *bp) } } for (i = 0; i < last_busied; i++) - vm_page_wakeup(bp->b_pages[i]); + vm_page_busy_wunlock(bp->b_pages[i]); } /* * This routine is called before a device strategy routine. * It is used to tell the VM system that paging I/O is in * progress, and treat the pages associated with the buffer - * almost as being VPO_BUSY. Also the object paging_in_progress + * almost as being write busy. Also the object paging_in_progress * flag is handled to make sure that the object doesn't become * inconsistant. * @@ -4050,7 +4049,7 @@ vfs_busy_pages(struct buf *bp, int clear if ((bp->b_flags & B_CLUSTER) == 0) { vm_object_pip_add(obj, 1); - vm_page_io_start(m); + vm_page_busy_rlock(m); } /* * When readying a buffer for a read ( i.e Modified: user/attilio/vmobj-readlock/sys/kern/vfs_cluster.c ============================================================================== --- user/attilio/vmobj-readlock/sys/kern/vfs_cluster.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/kern/vfs_cluster.c Sun Jun 23 22:27:57 2013 (r252130) @@ -466,7 +466,7 @@ cluster_rbuild(struct vnode *vp, u_quad_ for (j = 0; j < tbp->b_npages; j += 1) { vm_page_t m; m = tbp->b_pages[j]; - vm_page_io_start(m); + vm_page_busy_rlock(m); vm_object_pip_add(m->object, 1); if ((bp->b_npages == 0) || (bp->b_pages[bp->b_npages-1] != m)) { @@ -947,7 +947,7 @@ cluster_wbuild(struct vnode *vp, long si if (i != 0) { /* if not first buffer */ for (j = 0; j < tbp->b_npages; j += 1) { m = tbp->b_pages[j]; - if (m->oflags & VPO_BUSY) { + if (vm_page_busy_wlocked(m)) { VM_OBJECT_WUNLOCK( tbp->b_object); bqrelse(tbp); @@ -957,7 +957,7 @@ cluster_wbuild(struct vnode *vp, long si } for (j = 0; j < tbp->b_npages; j += 1) { m = tbp->b_pages[j]; - vm_page_io_start(m); + vm_page_busy_rlock(m); vm_object_pip_add(m->object, 1); if ((bp->b_npages == 0) || (bp->b_pages[bp->b_npages - 1] != m)) { Modified: user/attilio/vmobj-readlock/sys/mips/mips/pmap.c ============================================================================== --- user/attilio/vmobj-readlock/sys/mips/mips/pmap.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/mips/mips/pmap.c Sun Jun 23 22:27:57 2013 (r252130) @@ -2014,7 +2014,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); - KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_busy_wlocked(m), ("pmap_enter: page %p is not busy", m)); pa = VM_PAGE_TO_PHYS(m); newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, access, prot); @@ -2812,13 +2812,12 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2878,13 +2877,12 @@ pmap_is_modified(vm_page_t m) ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * If the page is not write busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PTE_D set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); rw_wlock(&pvh_global_lock); rv = pmap_testbit(m, PTE_D); @@ -2931,13 +2929,13 @@ pmap_clear_modify(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_BUSY) == 0, - ("pmap_clear_modify: page %p is busy", m)); + KASSERT(!vm_page_busy_wlocked(m), + ("pmap_clear_modify: page %p is write locked", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. + * write busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; Modified: user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c ============================================================================== --- user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c Sun Jun 23 22:27:57 2013 (r252130) @@ -1158,7 +1158,7 @@ moea_enter_locked(pmap_t pmap, vm_offset if (pmap_bootstrapped) rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m)) VM_OBJECT_ASSERT_LOCKED(m->object); /* XXX change the pvo head for fake pages */ @@ -1326,13 +1326,12 @@ moea_is_modified(mmu_t mmu, vm_page_t m) ("moea_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * If the page is not write busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PTE_CHG set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); rw_wlock(&pvh_global_lock); rv = moea_query_bit(m, PTE_CHG); @@ -1371,13 +1370,13 @@ moea_clear_modify(mmu_t mmu, vm_page_t m KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_BUSY) == 0, - ("moea_clear_modify: page %p is busy", m)); + KASSERT(!vm_page_busy_wlocked(m), + ("moea_clear_modify: page %p is write locked", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG * set. If the object containing the page is locked and the page is - * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. + * not write busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; @@ -1401,13 +1400,12 @@ moea_remove_write(mmu_t mmu, vm_page_t m ("moea_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); lo = moea_attr_fetch(m); Modified: user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c Sun Jun 23 22:27:57 2013 (r252130) @@ -1260,7 +1260,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_ pvo_flags = PVO_MANAGED; } - if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m)) VM_OBJECT_ASSERT_LOCKED(m->object); /* XXX change the pvo head for fake pages */ @@ -1522,13 +1522,12 @@ moea64_is_modified(mmu_t mmu, vm_page_t ("moea64_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * If the page is not write busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have LPTE_CHG set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); return (moea64_query_bit(mmu, m, LPTE_CHG)); } @@ -1562,13 +1561,13 @@ moea64_clear_modify(mmu_t mmu, vm_page_t KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_BUSY) == 0, - ("moea64_clear_modify: page %p is busy", m)); + KASSERT(!vm_page_busy_wlocked(m), + ("moea64_clear_modify: page %p is write locked", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG * set. If the object containing the page is locked and the page is - * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. + * not write busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; @@ -1590,13 +1589,12 @@ moea64_remove_write(mmu_t mmu, vm_page_t ("moea64_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return; powerpc_sync(); LOCK_TABLE_RD(); Modified: user/attilio/vmobj-readlock/sys/powerpc/booke/pmap.c ============================================================================== --- user/attilio/vmobj-readlock/sys/powerpc/booke/pmap.c Sun Jun 23 21:59:52 2013 (r252129) +++ user/attilio/vmobj-readlock/sys/powerpc/booke/pmap.c Sun Jun 23 22:27:57 2013 (r252130) @@ -1563,7 +1563,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t KASSERT((va <= VM_MAXUSER_ADDRESS), ("mmu_booke_enter_locked: user pmap, non user va")); } - if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m)) VM_OBJECT_ASSERT_LOCKED(m->object); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -1959,13 +1959,12 @@ mmu_booke_remove_write(mmu_t mmu, vm_pag ("mmu_booke_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * If the page is not write busied, then PGA_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((m->oflags & VPO_BUSY) == 0 && - (m->aflags & PGA_WRITEABLE) == 0) + if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { @@ -2204,13 +2203,12 @@ mmu_booke_is_modified(mmu_t mmu, vm_page rv = FALSE; /* - * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * If the page is not write busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can be modified. *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***help
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201306232227.r5NMRvZb007201>
