Date: Tue, 11 Jun 2013 19:02:11 +0000 (UTC) From: Attilio Rao <attilio@FreeBSD.org> To: src-committers@freebsd.org, svn-src-user@freebsd.org Subject: svn commit: r251628 - in user/attilio/vmobj-readlock/sys: cddl/contrib/opensolaris/uts/common/fs/zfs fs/tmpfs kern vm Message-ID: <201306111902.r5BJ2B1o054355@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: attilio Date: Tue Jun 11 19:02:10 2013 New Revision: 251628 URL: http://svnweb.freebsd.org/changeset/base/251628 Log: - Fix a mismerge which happened recently. - When a page is only read, rather than using indiscriminately page holding, soft busy it. Page hold mechanism is cheaper than wiring but it is also dangerous in a way that it does mostly break LRU. Unless this is absolutely necessary, avoid to hold the page. This patch clens up page busying for all the page copy cases. The only left usage of page holding are vm_fault_hold_pages(), vm_fault_hold() (which should be fixed for the proc_rwmem() case too) and finally exec_map_first_page(). Modified: user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c user/attilio/vmobj-readlock/sys/kern/imgact_elf.c user/attilio/vmobj-readlock/sys/vm/vm_extern.h user/attilio/vmobj-readlock/sys/vm/vm_glue.c Modified: user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c ============================================================================== --- user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Tue Jun 11 18:46:46 2013 (r251627) +++ user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Tue Jun 11 19:02:10 2013 (r251628) @@ -324,7 +324,8 @@ zfs_ioctl(vnode_t *vp, u_long com, intpt } static vm_page_t -page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes) +page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes, + boolean_t alloc) { vm_object_t obj; vm_page_t pp; @@ -348,18 +349,22 @@ page_busy(vnode_t *vp, int64_t start, in zfs_vmobject_wlock(obj); continue; } - } else { + } else + pp = NULL; + + if (pp == NULL && alloc) pp = vm_page_alloc(obj, OFF_TO_IDX(start), VM_ALLOC_SYSTEM | VM_ALLOC_IFCACHED | VM_ALLOC_NOBUSY); - } if (pp != NULL) { ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL); - vm_object_pip_add(obj, 1); vm_page_io_start(pp); - pmap_remove_write(pp); - vm_page_clear_dirty(pp, off, nbytes); + if (alloc) { + vm_object_pip_add(obj, 1); + pmap_remove_write(pp); + vm_page_clear_dirty(pp, off, nbytes); + } } break; } @@ -367,58 +372,12 @@ page_busy(vnode_t *vp, int64_t start, in } static void -page_unbusy(vm_page_t pp) +page_unbusy(vm_page_t pp, boolean_t unalloc) { vm_page_io_finish(pp); - vm_object_pip_subtract(pp->object, 1); -} - -static vm_page_t -page_hold(vnode_t *vp, int64_t start) -{ - vm_object_t obj; - vm_page_t pp; - - obj = vp->v_object; - zfs_vmobject_assert_wlocked(obj); - - for (;;) { - if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL && - pp->valid) { - if ((pp->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and - * sleeping so that the page daemon is less - * likely to reclaim it. - */ - vm_page_reference(pp); - vm_page_lock(pp); - zfs_vmobject_wunlock(obj); - vm_page_sleep(pp, "zfsmwb"); - zfs_vmobject_wlock(obj); - continue; - } - - ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL); - vm_page_lock(pp); - vm_page_hold(pp); - vm_page_unlock(pp); - - } else - pp = NULL; - break; - } - return (pp); -} - -static void -page_unhold(vm_page_t pp) -{ - - vm_page_lock(pp); - vm_page_unhold(pp); - vm_page_unlock(pp); + if (unalloc) + vm_object_pip_subtract(pp->object, 1); } static caddr_t @@ -482,7 +441,8 @@ update_pages(vnode_t *vp, int64_t start, zfs_vmobject_wlock(obj); vm_page_undirty(pp); - } else if ((pp = page_busy(vp, start, off, nbytes)) != NULL) { + } else if ((pp = page_busy(vp, start, off, nbytes, + TRUE)) != NULL) { zfs_vmobject_wunlock(obj); va = zfs_map_page(pp, &sf); @@ -491,7 +451,7 @@ update_pages(vnode_t *vp, int64_t start, zfs_unmap_page(sf); zfs_vmobject_wlock(obj); - page_unbusy(pp); + page_unbusy(pp, TRUE); } len -= nbytes; off = 0; @@ -599,7 +559,7 @@ mappedread(vnode_t *vp, int nbytes, uio_ vm_page_t pp; uint64_t bytes = MIN(PAGESIZE - off, len); - if (pp = page_hold(vp, start)) { + if (pp = page_busy(vp, start, 0, 0, FALSE)) { struct sf_buf *sf; caddr_t va; @@ -608,7 +568,7 @@ mappedread(vnode_t *vp, int nbytes, uio_ error = uiomove(va + off, bytes, UIO_READ, uio); zfs_unmap_page(sf); zfs_vmobject_wlock(obj); - page_unhold(pp); + page_unbusy(pp, FALSE); } else { zfs_vmobject_wunlock(obj); error = dmu_read_uio(os, zp->z_id, uio, bytes); Modified: user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c ============================================================================== --- user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c Tue Jun 11 18:46:46 2013 (r251627) +++ user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c Tue Jun 11 19:02:10 2013 (r251628) @@ -487,13 +487,13 @@ tmpfs_nocacheread(vm_object_t tobj, vm_p vm_page_zero_invalid(m, TRUE); vm_page_wakeup(m); } - vm_page_lock(m); - vm_page_hold(m); - vm_page_unlock(m); + vm_page_io_start(m); VM_OBJECT_WUNLOCK(tobj); error = uiomove_fromphys(&m, offset, tlen, uio); + VM_OBJECT_WLOCK(tobj); + vm_page_io_finish(m); + VM_OBJECT_WUNLOCK(tobj); vm_page_lock(m); - vm_page_unhold(m); if (m->queue == PQ_NONE) { vm_page_deactivate(m); } else { @@ -604,16 +604,14 @@ tmpfs_mappedwrite(vm_object_t tobj, size vm_page_zero_invalid(tpg, TRUE); vm_page_wakeup(tpg); } - vm_page_lock(tpg); - vm_page_hold(tpg); - vm_page_unlock(tpg); + vm_page_io_start(tpg); VM_OBJECT_WUNLOCK(tobj); error = uiomove_fromphys(&tpg, offset, tlen, uio); VM_OBJECT_WLOCK(tobj); + vm_page_io_finish(tpg); if (error == 0) vm_page_dirty(tpg); vm_page_lock(tpg); - vm_page_unhold(tpg); if (tpg->queue == PQ_NONE) { vm_page_deactivate(tpg); } else { Modified: user/attilio/vmobj-readlock/sys/kern/imgact_elf.c ============================================================================== --- user/attilio/vmobj-readlock/sys/kern/imgact_elf.c Tue Jun 11 18:46:46 2013 (r251627) +++ user/attilio/vmobj-readlock/sys/kern/imgact_elf.c Tue Jun 11 19:02:10 2013 (r251628) @@ -378,7 +378,7 @@ __elfN(map_partial)(vm_map_t map, vm_obj off = offset - trunc_page(offset); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, end - start); - vm_imgact_unmap_page(sf); + vm_imgact_unmap_page(object, sf); if (error) { return (KERN_FAILURE); } @@ -433,7 +433,7 @@ __elfN(map_insert)(vm_map_t map, vm_obje sz = PAGE_SIZE - off; error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, sz); - vm_imgact_unmap_page(sf); + vm_imgact_unmap_page(object, sf); if (error) { return (KERN_FAILURE); } @@ -553,7 +553,7 @@ __elfN(load_section)(struct image_params trunc_page(offset + filsz); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)map_addr, copy_len); - vm_imgact_unmap_page(sf); + vm_imgact_unmap_page(object, sf); if (error) { return (error); } Modified: user/attilio/vmobj-readlock/sys/vm/vm_extern.h ============================================================================== --- user/attilio/vmobj-readlock/sys/vm/vm_extern.h Tue Jun 11 18:46:46 2013 (r251627) +++ user/attilio/vmobj-readlock/sys/vm/vm_extern.h Tue Jun 11 19:02:10 2013 (r251628) @@ -87,7 +87,7 @@ void vnode_pager_setsize(struct vnode *, int vslock(void *, size_t); void vsunlock(void *, size_t); struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset); -void vm_imgact_unmap_page(struct sf_buf *sf); +void vm_imgact_unmap_page(vm_object_t, struct sf_buf *sf); void vm_thread_dispose(struct thread *td); int vm_thread_new(struct thread *td, int pages); int vm_mlock(struct proc *, struct ucred *, const void *, size_t); Modified: user/attilio/vmobj-readlock/sys/vm/vm_glue.c ============================================================================== --- user/attilio/vmobj-readlock/sys/vm/vm_glue.c Tue Jun 11 18:46:46 2013 (r251627) +++ user/attilio/vmobj-readlock/sys/vm/vm_glue.c Tue Jun 11 19:02:10 2013 (r251628) @@ -233,7 +233,7 @@ vsunlock(void *addr, size_t len) * Return the pinned page if successful; otherwise, return NULL. */ static vm_page_t -vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) +vm_imgact_page_iostart(vm_object_t object, vm_ooffset_t offset) { vm_page_t m, ma[1]; vm_pindex_t pindex; @@ -259,10 +259,7 @@ vm_imgact_hold_page(vm_object_t object, } vm_page_wakeup(m); } - vm_page_lock(m); - vm_page_hold(m); - vm_page_wakeup_locked(m); - vm_page_unlock(m); + vm_page_io_start(m); out: VM_OBJECT_WUNLOCK(object); return (m); @@ -277,7 +274,7 @@ vm_imgact_map_page(vm_object_t object, v { vm_page_t m; - m = vm_imgact_hold_page(object, offset); + m = vm_imgact_page_iostart(object, offset); if (m == NULL) return (NULL); sched_pin(); @@ -288,16 +285,16 @@ vm_imgact_map_page(vm_object_t object, v * Destroy the given CPU private mapping and unpin the page that it mapped. */ void -vm_imgact_unmap_page(struct sf_buf *sf) +vm_imgact_unmap_page(vm_object_t object, struct sf_buf *sf) { vm_page_t m; m = sf_buf_page(sf); sf_buf_free(sf); sched_unpin(); - vm_page_lock(m); - vm_page_unhold(m); - vm_page_unlock(m); + VM_OBJECT_WLOCK(object); + vm_page_io_finish(m); + VM_OBJECT_WUNLOCK(object); } void
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201306111902.r5BJ2B1o054355>