From owner-svn-src-user@FreeBSD.ORG Fri Apr 30 06:43:36 2010 Return-Path: Delivered-To: svn-src-user@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 7ED85106564A; Fri, 30 Apr 2010 06:43:36 +0000 (UTC) (envelope-from kmacy@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [69.147.83.44]) by mx1.freebsd.org (Postfix) with ESMTP id 6C0D08FC12; Fri, 30 Apr 2010 06:43:36 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id o3U6hatS087506; Fri, 30 Apr 2010 06:43:36 GMT (envelope-from kmacy@svn.freebsd.org) Received: (from kmacy@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id o3U6habn087486; Fri, 30 Apr 2010 06:43:36 GMT (envelope-from kmacy@svn.freebsd.org) Message-Id: <201004300643.o3U6habn087486@svn.freebsd.org> From: Kip Macy Date: Fri, 30 Apr 2010 06:43:36 +0000 (UTC) To: src-committers@freebsd.org, svn-src-user@freebsd.org X-SVN-Group: user MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r207425 - in user/kmacy/head_page_lock_incr/sys: amd64/amd64 dev/agp dev/drm dev/ti fs/tmpfs i386/i386 i386/xen kern net vm X-BeenThere: svn-src-user@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the experimental " user" src tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 30 Apr 2010 06:43:36 -0000 Author: kmacy Date: Fri Apr 30 06:43:35 2010 New Revision: 207425 URL: http://svn.freebsd.org/changeset/base/207425 Log: Peel the page queue mutex away from vm_page_{wire, unwire} Modified: user/kmacy/head_page_lock_incr/sys/amd64/amd64/pmap.c user/kmacy/head_page_lock_incr/sys/dev/agp/agp.c user/kmacy/head_page_lock_incr/sys/dev/agp/agp_i810.c user/kmacy/head_page_lock_incr/sys/dev/drm/via_dmablit.c user/kmacy/head_page_lock_incr/sys/dev/ti/if_ti.c user/kmacy/head_page_lock_incr/sys/fs/tmpfs/tmpfs_vnops.c user/kmacy/head_page_lock_incr/sys/i386/i386/pmap.c user/kmacy/head_page_lock_incr/sys/i386/xen/pmap.c user/kmacy/head_page_lock_incr/sys/kern/uipc_cow.c user/kmacy/head_page_lock_incr/sys/kern/uipc_syscalls.c user/kmacy/head_page_lock_incr/sys/kern/vfs_bio.c user/kmacy/head_page_lock_incr/sys/net/bpf_zerocopy.c user/kmacy/head_page_lock_incr/sys/vm/swap_pager.c user/kmacy/head_page_lock_incr/sys/vm/uma_core.c user/kmacy/head_page_lock_incr/sys/vm/vm_fault.c user/kmacy/head_page_lock_incr/sys/vm/vm_glue.c user/kmacy/head_page_lock_incr/sys/vm/vm_kern.c user/kmacy/head_page_lock_incr/sys/vm/vm_page.c user/kmacy/head_page_lock_incr/sys/vm/vm_page.h Modified: user/kmacy/head_page_lock_incr/sys/amd64/amd64/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/amd64/amd64/pmap.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/amd64/amd64/pmap.c Fri Apr 30 06:43:35 2010 (r207425) @@ -2123,7 +2123,7 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); - vm_page_unwire(m, 0); + vm_page_unwire_exclusive(m, 0); vm_page_free(m); } @@ -4101,7 +4101,7 @@ pmap_remove_pages(pmap_t pmap) TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); - vm_page_unwire(m, 0); + vm_page_unwire_exclusive(m, 0); vm_page_free(m); } } Modified: user/kmacy/head_page_lock_incr/sys/dev/agp/agp.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/dev/agp/agp.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/dev/agp/agp.c Fri Apr 30 06:43:35 2010 (r207425) @@ -623,9 +623,9 @@ bad: m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k)); if (k >= i) vm_page_wakeup(m); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(mem->am_obj); @@ -657,9 +657,9 @@ agp_generic_unbind_memory(device_t dev, VM_OBJECT_LOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, atop(i)); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(mem->am_obj); Modified: user/kmacy/head_page_lock_incr/sys/dev/agp/agp_i810.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/dev/agp/agp_i810.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/dev/agp/agp_i810.c Fri Apr 30 06:43:35 2010 (r207425) @@ -1011,9 +1011,9 @@ agp_i810_free_memory(device_t dev, struc VM_OBJECT_LOCK(mem->am_obj); m = vm_page_lookup(mem->am_obj, 0); VM_OBJECT_UNLOCK(mem->am_obj); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); - vm_page_unlock_queues(); + vm_page_unlock(m); } else { contigfree(sc->argb_cursor, mem->am_size, M_AGP); sc->argb_cursor = NULL; Modified: user/kmacy/head_page_lock_incr/sys/dev/drm/via_dmablit.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/dev/drm/via_dmablit.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/dev/drm/via_dmablit.c Fri Apr 30 06:43:35 2010 (r207425) @@ -178,9 +178,9 @@ via_free_sg_info(drm_via_sg_info_t *vsg) case dr_via_pages_locked: for (i=0; i < vsg->num_pages; ++i) { if ( NULL != (page = vsg->pages[i])) { - vm_page_lock_queues(); + vm_page_lock(page); vm_page_unwire(page, 0); - vm_page_unlock_queues(); + vm_page_unlock(page); } } case dr_via_pages_alloc: @@ -249,10 +249,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t if (m == NULL) break; vm_page_lock(m); - vm_page_lock_queues(); vm_page_wire(m); vm_page_unhold(m); - vm_page_unlock_queues(); vm_page_unlock(m); vsg->pages[i] = m; } Modified: user/kmacy/head_page_lock_incr/sys/dev/ti/if_ti.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/dev/ti/if_ti.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/dev/ti/if_ti.c Fri Apr 30 06:43:35 2010 (r207425) @@ -1488,10 +1488,12 @@ ti_newbuf_jumbo(sc, idx, m_old) } sf[i] = sf_buf_alloc(frame, SFB_NOWAIT); if (sf[i] == NULL) { - vm_page_lock_queues(); + vm_page_lock(frame); vm_page_unwire(frame, 0); + vm_page_lock_queues(); vm_page_free(frame); vm_page_unlock_queues(); + vm_page_lock(frame); device_printf(sc->ti_dev, "buffer allocation " "failed -- packet dropped!\n"); printf(" index %d page %d\n", idx, i); Modified: user/kmacy/head_page_lock_incr/sys/fs/tmpfs/tmpfs_vnops.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/fs/tmpfs/tmpfs_vnops.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/fs/tmpfs/tmpfs_vnops.c Fri Apr 30 06:43:35 2010 (r207425) @@ -460,9 +460,9 @@ tmpfs_nocacheread(vm_object_t tobj, vm_p error = uiomove_fromphys(&m, offset, tlen, uio); VM_OBJECT_LOCK(tobj); out: - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, TRUE); - vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); vm_object_pip_subtract(tobj, 1); VM_OBJECT_UNLOCK(tobj); @@ -667,14 +667,14 @@ nocache: out: if (vobj != NULL) VM_OBJECT_LOCK(vobj); - vm_page_lock_queues(); if (error == 0) { KASSERT(tpg->valid == VM_PAGE_BITS_ALL, ("parts of tpg invalid")); vm_page_dirty(tpg); } + vm_page_lock(tpg); vm_page_unwire(tpg, TRUE); - vm_page_unlock_queues(); + vm_page_unlock(tpg); vm_page_wakeup(tpg); if (vpg != NULL) vm_page_wakeup(vpg); Modified: user/kmacy/head_page_lock_incr/sys/i386/i386/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/i386/i386/pmap.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/i386/i386/pmap.c Fri Apr 30 06:43:35 2010 (r207425) @@ -2231,7 +2231,7 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); + vm_page_unwire_exclusive(m, 0); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); } @@ -4265,7 +4265,7 @@ pmap_remove_pages(pmap_t pmap) TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); + vm_page_unwire_exclusive(m, 0); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); } Modified: user/kmacy/head_page_lock_incr/sys/i386/xen/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/i386/xen/pmap.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/i386/xen/pmap.c Fri Apr 30 06:43:35 2010 (r207425) @@ -2128,7 +2128,7 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); + vm_page_unwire_exclusive(m, 0); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); } @@ -3637,7 +3637,7 @@ pmap_remove_pages(pmap_t pmap) TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); + vm_page_unwire_exclusive(m, 0); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); } Modified: user/kmacy/head_page_lock_incr/sys/kern/uipc_cow.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/kern/uipc_cow.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/kern/uipc_cow.c Fri Apr 30 06:43:35 2010 (r207425) @@ -80,17 +80,20 @@ socow_iodone(void *addr, void *args) pp = sf_buf_page(sf); sf_buf_free(sf); /* remove COW mapping */ - vm_page_lock_queues(); + vm_page_lock(pp); vm_page_cowclear(pp); vm_page_unwire(pp, 0); + vm_page_unlock(pp); /* * Check for the object going away on us. This can * happen since we don't hold a reference to it. * If so, we're responsible for freeing the page. */ - if (pp->wire_count == 0 && pp->object == NULL) + if (pp->wire_count == 0 && pp->object == NULL) { + vm_page_lock_queues(); vm_page_free(pp); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + } socow_stats.iodone++; } @@ -131,35 +134,38 @@ socow_setup(struct mbuf *m0, struct uio vm_page_lock(pp); vm_page_lock_queues(); if (vm_page_cowsetup(pp) != 0) { - vm_page_unhold(pp); vm_page_unlock_queues(); + vm_page_unhold(pp); vm_page_unlock(pp); return (0); } + vm_page_unlock_queues(); /* * wire the page for I/O */ vm_page_wire(pp); vm_page_unhold(pp); - vm_page_unlock_queues(); vm_page_unlock(pp); /* * Allocate an sf buf */ sf = sf_buf_alloc(pp, SFB_CATCH); if (!sf) { - vm_page_lock_queues(); + vm_page_lock(pp); vm_page_cowclear(pp); vm_page_unwire(pp, 0); + vm_page_unlock(pp); /* * Check for the object going away on us. This can * happen since we don't hold a reference to it. * If so, we're responsible for freeing the page. */ - if (pp->wire_count == 0 && pp->object == NULL) + if (pp->wire_count == 0 && pp->object == NULL) { + vm_page_lock_queues(); vm_page_free(pp); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + } socow_stats.fail_sf_buf++; return(0); } Modified: user/kmacy/head_page_lock_incr/sys/kern/uipc_syscalls.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/kern/uipc_syscalls.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/kern/uipc_syscalls.c Fri Apr 30 06:43:35 2010 (r207425) @@ -1715,16 +1715,19 @@ sf_buf_mext(void *addr, void *args) m = sf_buf_page(args); sf_buf_free(args); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); + vm_page_unlock(m); /* * Check for the object going away on us. This can * happen since we don't hold a reference to it. * If so, we're responsible for freeing the page. */ - if (m->wire_count == 0 && m->object == NULL) + if (m->wire_count == 0 && m->object == NULL) { + vm_page_lock_queues(); vm_page_free(m); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + } if (addr == NULL) return; sfs = addr; @@ -2108,8 +2111,9 @@ retry_space: mbstat.sf_iocnt++; } if (error) { - vm_page_lock_queues(); + vm_page_lock(pg); vm_page_unwire(pg, 0); + vm_page_unlock(pg); /* * See if anyone else might know about * this page. If not and it is not valid, @@ -2118,9 +2122,10 @@ retry_space: if (pg->wire_count == 0 && pg->valid == 0 && pg->busy == 0 && !(pg->oflags & VPO_BUSY) && pg->hold_count == 0) { + vm_page_lock_queues(); vm_page_free(pg); + vm_page_unlock_queues(); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(obj); if (error == EAGAIN) error = 0; /* not a real error */ @@ -2134,14 +2139,17 @@ retry_space: if ((sf = sf_buf_alloc(pg, (mnw ? SFB_NOWAIT : SFB_CATCH))) == NULL) { mbstat.sf_allocfail++; - vm_page_lock_queues(); + vm_page_lock(pg); vm_page_unwire(pg, 0); + vm_page_unlock(pg); /* * XXX: Not same check as above!? */ - if (pg->wire_count == 0 && pg->object == NULL) + if (pg->wire_count == 0 && pg->object == NULL) { + vm_page_lock_queues(); vm_page_free(pg); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + } error = (mnw ? EAGAIN : EINTR); break; } Modified: user/kmacy/head_page_lock_incr/sys/kern/vfs_bio.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/kern/vfs_bio.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/kern/vfs_bio.c Fri Apr 30 06:43:35 2010 (r207425) @@ -1563,7 +1563,6 @@ vfs_vmio_release(struct buf *bp) vm_page_t m; VM_OBJECT_LOCK(bp->b_bufobj->bo_object); - vm_page_lock_queues(); for (i = 0; i < bp->b_npages; i++) { m = bp->b_pages[i]; bp->b_pages[i] = NULL; @@ -1571,16 +1570,19 @@ vfs_vmio_release(struct buf *bp) * In order to keep page LRU ordering consistent, put * everything on the inactive queue. */ + vm_page_lock(m); vm_page_unwire(m, 0); /* * We don't mess with busy pages, it is * the responsibility of the process that * busied the pages to deal with them. */ - if ((m->oflags & VPO_BUSY) || (m->busy != 0)) + if ((m->oflags & VPO_BUSY) || (m->busy != 0)) { + vm_page_unlock(m); continue; - + } if (m->wire_count == 0) { + vm_page_lock_queues(); /* * Might as well free the page if we can and it has * no valid data. We also free the page if the @@ -1594,9 +1596,10 @@ vfs_vmio_release(struct buf *bp) } else if (buf_vm_page_count_severe()) { vm_page_try_to_cache(m); } + vm_page_unlock_queues(); } + vm_page_unlock(m); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); @@ -2942,7 +2945,6 @@ allocbuf(struct buf *bp, int size) vm_page_t m; VM_OBJECT_LOCK(bp->b_bufobj->bo_object); - vm_page_lock_queues(); for (i = desiredpages; i < bp->b_npages; i++) { /* * the page is not freed here -- it @@ -2953,12 +2955,13 @@ allocbuf(struct buf *bp, int size) KASSERT(m != bogus_page, ("allocbuf: bogus page found")); while (vm_page_sleep_if_busy(m, TRUE, "biodep")) - vm_page_lock_queues(); + ; bp->b_pages[i] = NULL; + vm_page_lock(m); vm_page_unwire(m, 0); + vm_page_unlock(m); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); @@ -3030,9 +3033,9 @@ allocbuf(struct buf *bp, int size) /* * We have a good page. */ - vm_page_lock_queues(); + vm_page_lock(m); vm_page_wire(m); - vm_page_unlock_queues(); + vm_page_unlock(m); bp->b_pages[bp->b_npages] = m; ++bp->b_npages; } Modified: user/kmacy/head_page_lock_incr/sys/net/bpf_zerocopy.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/net/bpf_zerocopy.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/net/bpf_zerocopy.c Fri Apr 30 06:43:35 2010 (r207425) @@ -111,12 +111,14 @@ struct zbuf { static void zbuf_page_free(vm_page_t pp) { - - vm_page_lock_queues(); + vm_page_lock(pp); vm_page_unwire(pp, 0); - if (pp->wire_count == 0 && pp->object == NULL) + if (pp->wire_count == 0 && pp->object == NULL) { + vm_page_lock_queues(); vm_page_free(pp); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + } + vm_page_unlock(pp); } /* @@ -169,10 +171,8 @@ zbuf_sfbuf_get(struct vm_map *map, vm_of if (pp == NULL) return (NULL); vm_page_lock(pp); - vm_page_lock_queues(); vm_page_wire(pp); vm_page_unhold(pp); - vm_page_unlock_queues(); vm_page_unlock(pp); sf = sf_buf_alloc(pp, SFB_NOWAIT); if (sf == NULL) { Modified: user/kmacy/head_page_lock_incr/sys/vm/swap_pager.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/swap_pager.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/vm/swap_pager.c Fri Apr 30 06:43:35 2010 (r207425) @@ -1717,9 +1717,9 @@ swp_pager_force_pagein(vm_object_t objec vm_page_lock(m); vm_page_lock_queues(); vm_page_activate(m); - vm_page_dirty(m); vm_page_unlock_queues(); vm_page_unlock(m); + vm_page_dirty(m); vm_page_wakeup(m); vm_pager_page_unswapped(m); return; Modified: user/kmacy/head_page_lock_incr/sys/vm/uma_core.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/uma_core.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/vm/uma_core.c Fri Apr 30 06:43:35 2010 (r207425) @@ -1023,8 +1023,8 @@ obj_alloc(uma_zone_t zone, int bytes, u_ pages--; p = TAILQ_LAST(&object->memq, pglist); vm_page_lock(p); - vm_page_lock_queues(); vm_page_unwire(p, 0); + vm_page_lock_queues(); vm_page_free(p); vm_page_unlock_queues(); vm_page_unlock(p); Modified: user/kmacy/head_page_lock_incr/sys/vm/vm_fault.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/vm_fault.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/vm/vm_fault.c Fri Apr 30 06:43:35 2010 (r207425) @@ -799,15 +799,11 @@ vnode_locked: if (wired && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) { vm_page_lock(fs.first_m); - vm_page_lock_queues(); vm_page_wire(fs.first_m); - vm_page_unlock_queues(); vm_page_unlock(fs.first_m); vm_page_lock(fs.m); - vm_page_lock_queues(); vm_page_unwire(fs.m, FALSE); - vm_page_unlock_queues(); vm_page_unlock(fs.m); } /* @@ -959,7 +955,6 @@ vnode_locked: vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); VM_OBJECT_LOCK(fs.object); vm_page_lock(fs.m); - vm_page_lock_queues(); /* * If the page is not wired down, then put it where the pageout daemon @@ -971,9 +966,10 @@ vnode_locked: else vm_page_unwire(fs.m, 1); } else { + vm_page_lock_queues(); vm_page_activate(fs.m); + vm_page_unlock_queues(); } - vm_page_unlock_queues(); vm_page_unlock(fs.m); vm_page_wakeup(fs.m); @@ -1132,9 +1128,7 @@ vm_fault_unwire(vm_map_t map, vm_offset_ pmap_change_wiring(pmap, va, FALSE); if (!fictitious) { vm_page_lock(PHYS_TO_VM_PAGE(pa)); - vm_page_lock_queues(); vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); - vm_page_unlock_queues(); vm_page_unlock(PHYS_TO_VM_PAGE(pa)); } } @@ -1281,21 +1275,15 @@ vm_fault_copy_entry(vm_map_t dst_map, vm if (upgrade) { vm_page_lock(src_m); - vm_page_lock_queues(); vm_page_unwire(src_m, 0); - vm_page_unlock_queues(); vm_page_lock(src_m); vm_page_lock(dst_m); - vm_page_lock_queues(); vm_page_wire(dst_m); - vm_page_unlock_queues(); vm_page_lock(dst_m); } else { vm_page_lock(dst_m); - vm_page_lock_queues(); vm_page_activate(dst_m); - vm_page_unlock_queues(); vm_page_lock(dst_m); } vm_page_wakeup(dst_m); Modified: user/kmacy/head_page_lock_incr/sys/vm/vm_glue.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/vm_glue.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/vm/vm_glue.c Fri Apr 30 06:43:35 2010 (r207425) @@ -437,8 +437,8 @@ vm_thread_stack_dispose(vm_object_t ksob if (m == NULL) panic("vm_thread_dispose: kstack already missing?"); vm_page_lock(m); - vm_page_lock_queues(); vm_page_unwire(m, 0); + vm_page_lock_queues(); vm_page_free(m); vm_page_unlock_queues(); vm_page_unlock(m); @@ -529,9 +529,7 @@ vm_thread_swapout(struct thread *td) panic("vm_thread_swapout: kstack already missing?"); vm_page_dirty(m); vm_page_lock(m); - vm_page_lock_queues(); vm_page_unwire(m, 0); - vm_page_unlock_queues(); vm_page_unlock(m); } VM_OBJECT_UNLOCK(ksobj); Modified: user/kmacy/head_page_lock_incr/sys/vm/vm_kern.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/vm_kern.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/vm/vm_kern.c Fri Apr 30 06:43:35 2010 (r207425) @@ -381,8 +381,8 @@ retry: m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); vm_page_lock(m); - vm_page_lock_queues(); vm_page_unwire(m, 0); + vm_page_lock_queues(); vm_page_free(m); vm_page_unlock_queues(); vm_page_unlock(m); Modified: user/kmacy/head_page_lock_incr/sys/vm/vm_page.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/vm_page.c Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/vm/vm_page.c Fri Apr 30 06:43:35 2010 (r207425) @@ -1545,12 +1545,15 @@ vm_page_wire(vm_page_t m) * and only unqueue the page if it is on some queue (if it is unmanaged * it is already off the queues). */ - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->flags & PG_FICTITIOUS) return; + vm_page_lock_assert(m, MA_OWNED); if (m->wire_count == 0) { - if ((m->flags & PG_UNMANAGED) == 0) + if ((m->flags & PG_UNMANAGED) == 0) { + vm_page_lock_queues(); vm_pageq_remove(m); + vm_page_unlock_queues(); + } atomic_add_int(&cnt.v_wire_count, 1); } m->wire_count++; @@ -1586,31 +1589,46 @@ vm_page_wire(vm_page_t m) * This routine may not block. */ void -vm_page_unwire(vm_page_t m, int activate) +vm_page_unwire_exclusive(vm_page_t m, int activate) { + boolean_t locked; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->flags & PG_FICTITIOUS) return; if (m->wire_count > 0) { m->wire_count--; if (m->wire_count == 0) { atomic_subtract_int(&cnt.v_wire_count, 1); + if (m->flags & PG_UNMANAGED) { - ; - } else if (activate) + locked = FALSE; + } else if (activate) { + locked = TRUE; + vm_page_lock_queues(); vm_page_enqueue(PQ_ACTIVE, m); - else { + } else { + locked = TRUE; + vm_page_lock_queues(); vm_page_flag_clear(m, PG_WINATCFLS); vm_page_enqueue(PQ_INACTIVE, m); } + if (locked) + vm_page_unlock_queues(); } } else { panic("vm_page_unwire: invalid wire count: %d", m->wire_count); } } +void +vm_page_unwire(vm_page_t m, int activate) +{ + + vm_page_lock_assert(m, MA_OWNED); + vm_page_unwire_exclusive(m, activate); +} + /* * Move the specified page to the inactive queue. If the page has * any associated swap, the swap is deallocated. @@ -1902,9 +1920,9 @@ retrylookup: goto retrylookup; } else { if ((allocflags & VM_ALLOC_WIRED) != 0) { - vm_page_lock_queues(); + vm_page_lock(m); vm_page_wire(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } if ((allocflags & VM_ALLOC_NOBUSY) == 0) vm_page_busy(m); @@ -2254,7 +2272,7 @@ void vm_page_cowclear(vm_page_t m) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->cow) { m->cow--; /* @@ -2270,6 +2288,7 @@ int vm_page_cowsetup(vm_page_t m) { + vm_page_lock_assert(m, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->cow == USHRT_MAX - 1) return (EBUSY); Modified: user/kmacy/head_page_lock_incr/sys/vm/vm_page.h ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/vm_page.h Fri Apr 30 06:32:37 2010 (r207424) +++ user/kmacy/head_page_lock_incr/sys/vm/vm_page.h Fri Apr 30 06:43:35 2010 (r207425) @@ -359,6 +359,7 @@ void vm_page_sleep(vm_page_t m, const ch vm_page_t vm_page_splay(vm_pindex_t, vm_page_t); vm_offset_t vm_page_startup(vm_offset_t vaddr); void vm_page_unwire (vm_page_t, int); +void vm_page_unwire_exclusive(vm_page_t, int); void vm_page_wire (vm_page_t); void vm_page_set_validclean (vm_page_t, int, int); void vm_page_clear_dirty (vm_page_t, int, int);