Date: Mon, 16 Jun 2014 18:15:27 +0000 (UTC) From: Attilio Rao <attilio@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r267548 - in head/sys: amd64/amd64 arm/arm dev/agp dev/cxgbe/tom dev/drm dev/drm2/i915 dev/drm2/ttm dev/ti dev/virtio/balloon dev/xen/balloon i386/i386 i386/xen ia64/ia64 kern mips/mips... Message-ID: <201406161815.s5GIFR2k043096@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: attilio Date: Mon Jun 16 18:15:27 2014 New Revision: 267548 URL: http://svnweb.freebsd.org/changeset/base/267548 Log: - Modify vm_page_unwire() and vm_page_enqueue() to directly accept the queue where to enqueue pages that are going to be unwired. - Add stronger checks to the enqueue/dequeue for the pagequeues when adding and removing pages to them. Of course, for unmanaged pages the queue parameter of vm_page_unwire() will be ignored, just as the active parameter today. This makes adding new pagequeues quicker. This change effectively modifies the KPI. __FreeBSD_version will be, however, bumped just when the full cache of free pages will be evicted. Sponsored by: EMC / Isilon storage division Reviewed by: alc Tested by: pho Modified: head/sys/amd64/amd64/pmap.c head/sys/arm/arm/pmap-v6.c head/sys/dev/agp/agp.c head/sys/dev/agp/agp_i810.c head/sys/dev/cxgbe/tom/t4_ddp.c head/sys/dev/drm/via_dmablit.c head/sys/dev/drm2/i915/i915_gem.c head/sys/dev/drm2/i915/i915_gem_gtt.c head/sys/dev/drm2/ttm/ttm_page_alloc.c head/sys/dev/ti/if_ti.c head/sys/dev/virtio/balloon/virtio_balloon.c head/sys/dev/xen/balloon/balloon.c head/sys/i386/i386/pmap.c head/sys/i386/xen/pmap.c head/sys/ia64/ia64/pmap.c head/sys/kern/uipc_syscalls.c head/sys/kern/vfs_bio.c head/sys/mips/mips/pmap.c head/sys/net/bpf_zerocopy.c head/sys/vm/uma_core.c head/sys/vm/vm_fault.c head/sys/vm/vm_glue.c head/sys/vm/vm_kern.c head/sys/vm/vm_page.c head/sys/vm/vm_page.h Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/amd64/amd64/pmap.c Mon Jun 16 18:15:27 2014 (r267548) @@ -2868,7 +2868,7 @@ free_pv_chunk(struct pv_chunk *pc) /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); } Modified: head/sys/arm/arm/pmap-v6.c ============================================================================== --- head/sys/arm/arm/pmap-v6.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/arm/arm/pmap-v6.c Mon Jun 16 18:15:27 2014 (r267548) @@ -4222,7 +4222,7 @@ pmap_free_pv_chunk(struct pv_chunk *pc) /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); Modified: head/sys/dev/agp/agp.c ============================================================================== --- head/sys/dev/agp/agp.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/agp/agp.c Mon Jun 16 18:15:27 2014 (r267548) @@ -629,7 +629,7 @@ bad: if (k >= i) vm_page_xunbusy(m); vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); } VM_OBJECT_WUNLOCK(mem->am_obj); @@ -663,7 +663,7 @@ agp_generic_unbind_memory(device_t dev, for (i = 0; i < mem->am_size; i += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, atop(i)); vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); } VM_OBJECT_WUNLOCK(mem->am_obj); Modified: head/sys/dev/agp/agp_i810.c ============================================================================== --- head/sys/dev/agp/agp_i810.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/agp/agp_i810.c Mon Jun 16 18:15:27 2014 (r267548) @@ -2009,7 +2009,7 @@ agp_i810_free_memory(device_t dev, struc VM_OBJECT_WLOCK(mem->am_obj); m = vm_page_lookup(mem->am_obj, 0); vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); VM_OBJECT_WUNLOCK(mem->am_obj); } else { Modified: head/sys/dev/cxgbe/tom/t4_ddp.c ============================================================================== --- head/sys/dev/cxgbe/tom/t4_ddp.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/cxgbe/tom/t4_ddp.c Mon Jun 16 18:15:27 2014 (r267548) @@ -869,7 +869,7 @@ unwire_ddp_buffer(struct ddp_buffer *db) for (i = 0; i < db->npages; i++) { p = db->pages[i]; vm_page_lock(p); - vm_page_unwire(p, 0); + vm_page_unwire(p, PQ_INACTIVE); vm_page_unlock(p); } } Modified: head/sys/dev/drm/via_dmablit.c ============================================================================== --- head/sys/dev/drm/via_dmablit.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/drm/via_dmablit.c Mon Jun 16 18:15:27 2014 (r267548) @@ -179,7 +179,7 @@ via_free_sg_info(drm_via_sg_info_t *vsg) for (i=0; i < vsg->num_pages; ++i) { page = vsg->pages[i]; vm_page_lock(page); - vm_page_unwire(page, 0); + vm_page_unwire(page, PQ_INACTIVE); vm_page_unlock(page); } case dr_via_pages_alloc: Modified: head/sys/dev/drm2/i915/i915_gem.c ============================================================================== --- head/sys/dev/drm2/i915/i915_gem.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/drm2/i915/i915_gem.c Mon Jun 16 18:15:27 2014 (r267548) @@ -1039,7 +1039,7 @@ i915_gem_swap_io(struct drm_device *dev, vm_page_dirty(m); vm_page_reference(m); vm_page_lock(m); - vm_page_unwire(m, 1); + vm_page_unwire(m, PQ_ACTIVE); vm_page_unlock(m); atomic_add_long(&i915_gem_wired_pages_cnt, -1); @@ -2247,7 +2247,7 @@ failed: for (j = 0; j < i; j++) { m = obj->pages[j]; vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); atomic_add_long(&i915_gem_wired_pages_cnt, -1); } @@ -2308,7 +2308,7 @@ i915_gem_object_put_pages_gtt(struct drm if (obj->madv == I915_MADV_WILLNEED) vm_page_reference(m); vm_page_lock(m); - vm_page_unwire(obj->pages[i], 1); + vm_page_unwire(obj->pages[i], PQ_ACTIVE); vm_page_unlock(m); atomic_add_long(&i915_gem_wired_pages_cnt, -1); } @@ -3611,7 +3611,7 @@ i915_gem_detach_phys_object(struct drm_d vm_page_reference(m); vm_page_lock(m); vm_page_dirty(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); atomic_add_long(&i915_gem_wired_pages_cnt, -1); } @@ -3676,7 +3676,7 @@ i915_gem_attach_phys_object(struct drm_d vm_page_reference(m); vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); atomic_add_long(&i915_gem_wired_pages_cnt, -1); } Modified: head/sys/dev/drm2/i915/i915_gem_gtt.c ============================================================================== --- head/sys/dev/drm2/i915/i915_gem_gtt.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/drm2/i915/i915_gem_gtt.c Mon Jun 16 18:15:27 2014 (r267548) @@ -206,7 +206,7 @@ i915_gem_cleanup_aliasing_ppgtt(struct d for (i = 0; i < ppgtt->num_pd_entries; i++) { m = ppgtt->pt_pages[i]; if (m != NULL) { - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); } } Modified: head/sys/dev/drm2/ttm/ttm_page_alloc.c ============================================================================== --- head/sys/dev/drm2/ttm/ttm_page_alloc.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/drm2/ttm/ttm_page_alloc.c Mon Jun 16 18:15:27 2014 (r267548) @@ -139,7 +139,7 @@ ttm_vm_page_free(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m)); m->flags &= ~PG_FICTITIOUS; m->oflags |= VPO_UNMANAGED; - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); } Modified: head/sys/dev/ti/if_ti.c ============================================================================== --- head/sys/dev/ti/if_ti.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/ti/if_ti.c Mon Jun 16 18:15:27 2014 (r267548) @@ -1616,7 +1616,7 @@ ti_newbuf_jumbo(struct ti_softc *sc, int } sf[i] = sf_buf_alloc(frame, SFB_NOWAIT); if (sf[i] == NULL) { - vm_page_unwire(frame, 0); + vm_page_unwire(frame, PQ_INACTIVE); vm_page_free(frame); device_printf(sc->ti_dev, "buffer allocation " "failed -- packet dropped!\n"); Modified: head/sys/dev/virtio/balloon/virtio_balloon.c ============================================================================== --- head/sys/dev/virtio/balloon/virtio_balloon.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/virtio/balloon/virtio_balloon.c Mon Jun 16 18:15:27 2014 (r267548) @@ -450,7 +450,7 @@ static void vtballoon_free_page(struct vtballoon_softc *sc, vm_page_t m) { - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); sc->vtballoon_current_npages--; } Modified: head/sys/dev/xen/balloon/balloon.c ============================================================================== --- head/sys/dev/xen/balloon/balloon.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/dev/xen/balloon/balloon.c Mon Jun 16 18:15:27 2014 (r267548) @@ -255,7 +255,7 @@ increase_reservation(unsigned long nr_pa set_phys_to_machine(pfn, frame_list[i]); - vm_page_unwire(page, 0); + vm_page_unwire(page, PQ_INACTIVE); vm_page_free(page); } @@ -297,7 +297,7 @@ decrease_reservation(unsigned long nr_pa set_phys_to_machine(pfn, INVALID_P2M_ENTRY); if (balloon_append(page) != 0) { - vm_page_unwire(page, 0); + vm_page_unwire(page, PQ_INACTIVE); vm_page_free(page); nr_pages = i; Modified: head/sys/i386/i386/pmap.c ============================================================================== --- head/sys/i386/i386/pmap.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/i386/i386/pmap.c Mon Jun 16 18:15:27 2014 (r267548) @@ -2368,7 +2368,7 @@ free_pv_chunk(struct pv_chunk *pc) /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); } Modified: head/sys/i386/xen/pmap.c ============================================================================== --- head/sys/i386/xen/pmap.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/i386/xen/pmap.c Mon Jun 16 18:15:27 2014 (r267548) @@ -2144,7 +2144,7 @@ free_pv_chunk(struct pv_chunk *pc) /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); } Modified: head/sys/ia64/ia64/pmap.c ============================================================================== --- head/sys/ia64/ia64/pmap.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/ia64/ia64/pmap.c Mon Jun 16 18:15:27 2014 (r267548) @@ -932,7 +932,7 @@ free_pv_chunk(struct pv_chunk *pc) PV_STAT(pc_chunk_frees++); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(IA64_RR_MASK((vm_offset_t)pc)); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); } Modified: head/sys/kern/uipc_syscalls.c ============================================================================== --- head/sys/kern/uipc_syscalls.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/kern/uipc_syscalls.c Mon Jun 16 18:15:27 2014 (r267548) @@ -1996,7 +1996,7 @@ sf_buf_mext(struct mbuf *mb, void *addr, m = sf_buf_page(args); sf_buf_free(args); vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); /* * Check for the object going away on us. This can * happen since we don't hold a reference to it. @@ -2692,7 +2692,7 @@ sendfile_readpage(vm_object_t obj, struc } else if (m != NULL) { free_page: vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); /* * See if anyone else might know about this page. If @@ -3050,7 +3050,7 @@ retry_space: if (sf == NULL) { SFSTAT_INC(sf_allocfail); vm_page_lock(pg); - vm_page_unwire(pg, 0); + vm_page_unwire(pg, PQ_INACTIVE); KASSERT(pg->object != NULL, ("%s: object disappeared", __func__)); vm_page_unlock(pg); Modified: head/sys/kern/vfs_bio.c ============================================================================== --- head/sys/kern/vfs_bio.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/kern/vfs_bio.c Mon Jun 16 18:15:27 2014 (r267548) @@ -1896,7 +1896,7 @@ vfs_vmio_release(struct buf *bp) * everything on the inactive queue. */ vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); /* * Might as well free the page if we can and it has @@ -3483,7 +3483,7 @@ allocbuf(struct buf *bp, int size) bp->b_pages[i] = NULL; vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); } VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); Modified: head/sys/mips/mips/pmap.c ============================================================================== --- head/sys/mips/mips/pmap.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/mips/mips/pmap.c Mon Jun 16 18:15:27 2014 (r267548) @@ -1535,7 +1535,7 @@ free_pv_chunk(struct pv_chunk *pc) PV_STAT(pc_chunk_frees++); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc)); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); } Modified: head/sys/net/bpf_zerocopy.c ============================================================================== --- head/sys/net/bpf_zerocopy.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/net/bpf_zerocopy.c Mon Jun 16 18:15:27 2014 (r267548) @@ -114,7 +114,7 @@ zbuf_page_free(vm_page_t pp) { vm_page_lock(pp); - vm_page_unwire(pp, 0); + vm_page_unwire(pp, PQ_INACTIVE); if (pp->wire_count == 0 && pp->object == NULL) vm_page_free(pp); vm_page_unlock(pp); Modified: head/sys/vm/uma_core.c ============================================================================== --- head/sys/vm/uma_core.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/vm/uma_core.c Mon Jun 16 18:15:27 2014 (r267548) @@ -1154,7 +1154,7 @@ noobj_alloc(uma_zone_t zone, int bytes, * exit. */ TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { - vm_page_unwire(p, 0); + vm_page_unwire(p, PQ_INACTIVE); vm_page_free(p); } return (NULL); Modified: head/sys/vm/vm_fault.c ============================================================================== --- head/sys/vm/vm_fault.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/vm/vm_fault.c Mon Jun 16 18:15:27 2014 (r267548) @@ -755,7 +755,7 @@ vnode_locked: vm_page_unlock(fs.first_m); vm_page_lock(fs.m); - vm_page_unwire(fs.m, FALSE); + vm_page_unwire(fs.m, PQ_INACTIVE); vm_page_unlock(fs.m); } /* @@ -917,7 +917,7 @@ vnode_locked: if (wired) vm_page_wire(fs.m); else - vm_page_unwire(fs.m, 1); + vm_page_unwire(fs.m, PQ_ACTIVE); } else vm_page_activate(fs.m); if (m_hold != NULL) { @@ -1208,7 +1208,7 @@ vm_fault_unwire(vm_map_t map, vm_offset_ if (!fictitious) { m = PHYS_TO_VM_PAGE(pa); vm_page_lock(m); - vm_page_unwire(m, TRUE); + vm_page_unwire(m, PQ_ACTIVE); vm_page_unlock(m); } } @@ -1390,7 +1390,7 @@ again: if (upgrade) { if (src_m != dst_m) { vm_page_lock(src_m); - vm_page_unwire(src_m, 0); + vm_page_unwire(src_m, PQ_INACTIVE); vm_page_unlock(src_m); vm_page_lock(dst_m); vm_page_wire(dst_m); Modified: head/sys/vm/vm_glue.c ============================================================================== --- head/sys/vm/vm_glue.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/vm/vm_glue.c Mon Jun 16 18:15:27 2014 (r267548) @@ -418,7 +418,7 @@ vm_thread_stack_dispose(vm_object_t ksob if (m == NULL) panic("vm_thread_dispose: kstack already missing?"); vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); vm_page_unlock(m); } @@ -507,7 +507,7 @@ vm_thread_swapout(struct thread *td) panic("vm_thread_swapout: kstack already missing?"); vm_page_dirty(m); vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); } VM_OBJECT_WUNLOCK(ksobj); Modified: head/sys/vm/vm_kern.c ============================================================================== --- head/sys/vm/vm_kern.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/vm/vm_kern.c Mon Jun 16 18:15:27 2014 (r267548) @@ -193,7 +193,7 @@ retry: i -= PAGE_SIZE; m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); } vmem_free(vmem, addr, size); @@ -367,7 +367,7 @@ retry: i -= PAGE_SIZE; m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); } VM_OBJECT_WUNLOCK(object); @@ -401,7 +401,7 @@ kmem_unback(vm_object_t object, vm_offse VM_OBJECT_WLOCK(object); for (i = 0; i < size; i += PAGE_SIZE) { m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); - vm_page_unwire(m, 0); + vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); } VM_OBJECT_WUNLOCK(object); Modified: head/sys/vm/vm_page.c ============================================================================== --- head/sys/vm/vm_page.c Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/vm/vm_page.c Mon Jun 16 18:15:27 2014 (r267548) @@ -147,7 +147,7 @@ static uma_zone_t fakepg_zone; static struct vnode *vm_page_alloc_init(vm_page_t m); static void vm_page_cache_turn_free(vm_page_t m); static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); -static void vm_page_enqueue(int queue, vm_page_t m); +static void vm_page_enqueue(uint8_t queue, vm_page_t m); static void vm_page_init_fakepg(void *dummy); static int vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, vm_page_t mpred); @@ -2029,8 +2029,8 @@ vm_page_dequeue(vm_page_t m) struct vm_pagequeue *pq; vm_page_assert_locked(m); - KASSERT(m->queue == PQ_ACTIVE || m->queue == PQ_INACTIVE, - ("vm_page_dequeue: page %p is not queued", m)); + KASSERT(m->queue < PQ_COUNT, ("vm_page_dequeue: page %p is not queued", + m)); pq = vm_page_pagequeue(m); vm_pagequeue_lock(pq); m->queue = PQ_NONE; @@ -2067,11 +2067,14 @@ vm_page_dequeue_locked(vm_page_t m) * The page must be locked. */ static void -vm_page_enqueue(int queue, vm_page_t m) +vm_page_enqueue(uint8_t queue, vm_page_t m) { struct vm_pagequeue *pq; vm_page_lock_assert(m, MA_OWNED); + KASSERT(queue < PQ_COUNT, + ("vm_page_enqueue: invalid queue %u request for page %p", + queue, m)); pq = &vm_phys_domain(m)->vmd_pagequeues[queue]; vm_pagequeue_lock(pq); m->queue = queue; @@ -2330,9 +2333,7 @@ vm_page_wire(vm_page_t m) * * Release one wiring of the specified page, potentially enabling it to be * paged again. If paging is enabled, then the value of the parameter - * "activate" determines to which queue the page is added. If "activate" is - * non-zero, then the page is added to the active queue. Otherwise, it is - * added to the inactive queue. + * "queue" determines the queue to which the page is added. * * However, unless the page belongs to an object, it is not enqueued because * it cannot be paged out. @@ -2342,9 +2343,12 @@ vm_page_wire(vm_page_t m) * A managed page must be locked. */ void -vm_page_unwire(vm_page_t m, int activate) +vm_page_unwire(vm_page_t m, uint8_t queue) { + KASSERT(queue < PQ_COUNT, + ("vm_page_unwire: invalid queue %u request for page %p", + queue, m)); if ((m->oflags & VPO_UNMANAGED) == 0) vm_page_lock_assert(m, MA_OWNED); if ((m->flags & PG_FICTITIOUS) != 0) { @@ -2359,9 +2363,9 @@ vm_page_unwire(vm_page_t m, int activate if ((m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL) return; - if (!activate) + if (queue == PQ_INACTIVE) m->flags &= ~PG_WINATCFLS; - vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m); + vm_page_enqueue(queue, m); } } else panic("vm_page_unwire: page %p's wire count is zero", m); Modified: head/sys/vm/vm_page.h ============================================================================== --- head/sys/vm/vm_page.h Mon Jun 16 18:14:05 2014 (r267547) +++ head/sys/vm/vm_page.h Mon Jun 16 18:15:27 2014 (r267548) @@ -465,7 +465,7 @@ vm_offset_t vm_page_startup(vm_offset_t void vm_page_sunbusy(vm_page_t m); int vm_page_trysbusy(vm_page_t m); void vm_page_unhold_pages(vm_page_t *ma, int count); -void vm_page_unwire (vm_page_t, int); +void vm_page_unwire (vm_page_t m, uint8_t queue); void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); void vm_page_wire (vm_page_t); void vm_page_xunbusy_hard(vm_page_t m);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201406161815.s5GIFR2k043096>