Date: Mon, 26 May 2014 09:48:57 +0000 (UTC) From: Attilio Rao <attilio@FreeBSD.org> To: src-committers@freebsd.org, svn-src-user@freebsd.org Subject: svn commit: r266683 - in user/attilio/rm_vmobj_cache/sys: amd64/amd64 arm/arm dev/drm2/i915 dev/drm2/ttm dev/ti dev/virtio/balloon dev/xen/balloon i386/i386 i386/xen ia64/ia64 mips/mips powerpc/aim... Message-ID: <201405260948.s4Q9mvaR009596@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: attilio Date: Mon May 26 09:48:57 2014 New Revision: 266683 URL: http://svnweb.freebsd.org/changeset/base/266683 Log: Allow vm_page_free_toq() to successfully return pages to the freelist in the case they are unmanaged and have a wire_count == 1. For such case, vm_page_free_toq() will also take care to decrease the total count of wire pages. This drastically removes the number of calls to vm_page_unwire() and manual wire_count frobbing. Suggested by: alc Modified: user/attilio/rm_vmobj_cache/sys/amd64/amd64/pmap.c user/attilio/rm_vmobj_cache/sys/amd64/amd64/uma_machdep.c user/attilio/rm_vmobj_cache/sys/arm/arm/pmap-v6.c user/attilio/rm_vmobj_cache/sys/dev/drm2/i915/i915_gem_gtt.c user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_bo.c user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_page_alloc.c user/attilio/rm_vmobj_cache/sys/dev/ti/if_ti.c user/attilio/rm_vmobj_cache/sys/dev/virtio/balloon/virtio_balloon.c user/attilio/rm_vmobj_cache/sys/dev/xen/balloon/balloon.c user/attilio/rm_vmobj_cache/sys/i386/i386/pmap.c user/attilio/rm_vmobj_cache/sys/i386/xen/pmap.c user/attilio/rm_vmobj_cache/sys/ia64/ia64/pmap.c user/attilio/rm_vmobj_cache/sys/ia64/ia64/uma_machdep.c user/attilio/rm_vmobj_cache/sys/mips/mips/pmap.c user/attilio/rm_vmobj_cache/sys/mips/mips/uma_machdep.c user/attilio/rm_vmobj_cache/sys/powerpc/aim/uma_machdep.c user/attilio/rm_vmobj_cache/sys/powerpc/booke/pmap.c user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/pmap.c user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/vm_machdep.c user/attilio/rm_vmobj_cache/sys/vm/uma_core.c user/attilio/rm_vmobj_cache/sys/vm/vm_kern.c user/attilio/rm_vmobj_cache/sys/vm/vm_object.c user/attilio/rm_vmobj_cache/sys/vm/vm_page.c user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_idpgtbl.c user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_utils.c Modified: user/attilio/rm_vmobj_cache/sys/amd64/amd64/pmap.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/amd64/amd64/pmap.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/amd64/amd64/pmap.c Mon May 26 09:48:57 2014 (r266683) @@ -2012,7 +2012,17 @@ pmap_free_zero_pages(struct spglist *fre while ((m = SLIST_FIRST(free)) != NULL) { SLIST_REMOVE_HEAD(free, plinks.s.ss); - /* Preserve the page's PG_ZERO setting. */ + + /* + * Preserve the page's PG_ZERO setting. + * However, as the pages are unmanaged, fix-up the wired count + * to perform a correct free. + */ + if (m->wire_count != 0) + panic("pmap_free_zero_pages: wrong wire count %u for page %p", + m->wire_count, m); + m->wire_count = 1; + atomic_add_int(&vm_cnt.v_wire_count, 1); vm_page_free_toq(m); } } @@ -2329,8 +2339,6 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t /* Have to allocate a new pdp, recurse */ if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index, lockp) == NULL) { - --m->wire_count; - atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } @@ -2362,8 +2370,6 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t /* Have to allocate a new pd, recurse */ if (_pmap_allocpte(pmap, NUPDE + pdpindex, lockp) == NULL) { - --m->wire_count; - atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } @@ -2376,9 +2382,6 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t /* Have to allocate a new pd, recurse */ if (_pmap_allocpte(pmap, NUPDE + pdpindex, lockp) == NULL) { - --m->wire_count; - atomic_subtract_int(&vm_cnt.v_wire_count, - 1); vm_page_free_zero(m); return (NULL); } @@ -2516,8 +2519,6 @@ pmap_release(pmap_t pmap) pmap->pm_pml4[DMPML4I + i] = 0; pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */ - m->wire_count--; - atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); if (pmap->pm_pcid != -1) free_unr(&pcid_unr, pmap->pm_pcid); @@ -2815,6 +2816,9 @@ reclaim_pv_chunk(pmap_t locked_pmap, str m_pc = SLIST_FIRST(&free); SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ + KASSERT((m_pc->oflags & VPO_UNMANAGED) != 0, + ("reclaim_pv_chunk: recycled page table page %p not unmanaged", + m_pc)); m_pc->wire_count = 1; atomic_add_int(&vm_cnt.v_wire_count, 1); } @@ -2868,7 +2872,6 @@ free_pv_chunk(struct pv_chunk *pc) /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); - vm_page_unwire(m, 0); vm_page_free(m); } Modified: user/attilio/rm_vmobj_cache/sys/amd64/amd64/uma_machdep.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/amd64/amd64/uma_machdep.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/amd64/amd64/uma_machdep.c Mon May 26 09:48:57 2014 (r266683) @@ -78,7 +78,5 @@ uma_small_free(void *mem, int size, u_in pa = DMAP_TO_PHYS((vm_offset_t)mem); dump_drop_page(pa); m = PHYS_TO_VM_PAGE(pa); - m->wire_count--; vm_page_free(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); } Modified: user/attilio/rm_vmobj_cache/sys/arm/arm/pmap-v6.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/arm/arm/pmap-v6.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/arm/arm/pmap-v6.c Mon May 26 09:48:57 2014 (r266683) @@ -4223,7 +4223,6 @@ pmap_free_pv_chunk(struct pv_chunk *pc) /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); Modified: user/attilio/rm_vmobj_cache/sys/dev/drm2/i915/i915_gem_gtt.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/dev/drm2/i915/i915_gem_gtt.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/dev/drm2/i915/i915_gem_gtt.c Mon May 26 09:48:57 2014 (r266683) @@ -205,10 +205,8 @@ i915_gem_cleanup_aliasing_ppgtt(struct d for (i = 0; i < ppgtt->num_pd_entries; i++) { m = ppgtt->pt_pages[i]; - if (m != NULL) { - vm_page_unwire(m, 0); + if (m != NULL) vm_page_free(m); - } } free(ppgtt->pt_pages, DRM_I915_GEM); free(ppgtt, DRM_I915_GEM); Modified: user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_bo.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_bo.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_bo.c Mon May 26 09:48:57 2014 (r266683) @@ -1472,7 +1472,6 @@ static void ttm_bo_global_kobj_release(s { ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); - vm_page_unwire(glob->dummy_read_page, 0); vm_page_free(glob->dummy_read_page); } @@ -1519,7 +1518,6 @@ int ttm_bo_global_init(struct drm_global return (0); out_no_shrink: - vm_page_unwire(glob->dummy_read_page, 0); vm_page_free(glob->dummy_read_page); out_no_drp: free(glob, M_DRM_GLOBAL); Modified: user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_page_alloc.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_page_alloc.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_page_alloc.c Mon May 26 09:48:57 2014 (r266683) @@ -139,7 +139,6 @@ ttm_vm_page_free(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m)); m->flags &= ~PG_FICTITIOUS; m->oflags |= VPO_UNMANAGED; - vm_page_unwire(m, 0); vm_page_free(m); } Modified: user/attilio/rm_vmobj_cache/sys/dev/ti/if_ti.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/dev/ti/if_ti.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/dev/ti/if_ti.c Mon May 26 09:48:57 2014 (r266683) @@ -1616,7 +1616,6 @@ ti_newbuf_jumbo(struct ti_softc *sc, int } sf[i] = sf_buf_alloc(frame, SFB_NOWAIT); if (sf[i] == NULL) { - vm_page_unwire(frame, 0); vm_page_free(frame); device_printf(sc->ti_dev, "buffer allocation " "failed -- packet dropped!\n"); Modified: user/attilio/rm_vmobj_cache/sys/dev/virtio/balloon/virtio_balloon.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/dev/virtio/balloon/virtio_balloon.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/dev/virtio/balloon/virtio_balloon.c Mon May 26 09:48:57 2014 (r266683) @@ -450,7 +450,6 @@ static void vtballoon_free_page(struct vtballoon_softc *sc, vm_page_t m) { - vm_page_unwire(m, 0); vm_page_free(m); sc->vtballoon_current_npages--; } Modified: user/attilio/rm_vmobj_cache/sys/dev/xen/balloon/balloon.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/dev/xen/balloon/balloon.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/dev/xen/balloon/balloon.c Mon May 26 09:48:57 2014 (r266683) @@ -255,7 +255,6 @@ increase_reservation(unsigned long nr_pa set_phys_to_machine(pfn, frame_list[i]); - vm_page_unwire(page, 0); vm_page_free(page); } @@ -297,7 +296,6 @@ decrease_reservation(unsigned long nr_pa set_phys_to_machine(pfn, INVALID_P2M_ENTRY); if (balloon_append(page) != 0) { - vm_page_unwire(page, 0); vm_page_free(page); nr_pages = i; Modified: user/attilio/rm_vmobj_cache/sys/i386/i386/pmap.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/i386/i386/pmap.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/i386/i386/pmap.c Mon May 26 09:48:57 2014 (r266683) @@ -1578,7 +1578,17 @@ pmap_free_zero_pages(struct spglist *fre while ((m = SLIST_FIRST(free)) != NULL) { SLIST_REMOVE_HEAD(free, plinks.s.ss); - /* Preserve the page's PG_ZERO setting. */ + + /* + * Preserve the page's PG_ZERO setting. + * However, as the pages are unmanaged, fix-up the wired count + * to perform a correct free. + */ + if (m->wire_count != 0) + panic("pmap_free_zero_pages: wrong wire count %u for page %p", + m->wire_count, m); + m->wire_count = 1; + atomic_add_int(&vm_cnt.v_wire_count, 1); vm_page_free_toq(m); } } @@ -2049,8 +2059,6 @@ pmap_release(pmap_t pmap) KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), ("pmap_release: got wrong ptd page")); #endif - m->wire_count--; - atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); } } @@ -2312,6 +2320,9 @@ out: m_pc = SLIST_FIRST(&free); SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ + KASSERT((m_pc->oflags & VPO_UNMANAGED) != 0, + ("pmap_pv_reclaim: recycled page table page %p not unmanaged", + m_pc)); m_pc->wire_count = 1; atomic_add_int(&vm_cnt.v_wire_count, 1); } @@ -2368,7 +2379,6 @@ free_pv_chunk(struct pv_chunk *pc) /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); } Modified: user/attilio/rm_vmobj_cache/sys/i386/xen/pmap.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/i386/xen/pmap.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/i386/xen/pmap.c Mon May 26 09:48:57 2014 (r266683) @@ -1336,6 +1336,16 @@ pmap_free_zero_pages(vm_page_t free) m = free; free = (void *)m->object; m->object = NULL; + + /* + * As the pages are unmanaged, fix-up the wired count + * to perform a correct free. + */ + if (m->wire_count != 0) + panic("pmap_free_zero_pages: wrong wire count %u for page %p", + m->wire_count, m); + m->wire_count = 1; + atomic_add_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); } } @@ -1812,8 +1822,6 @@ pmap_release(pmap_t pmap) KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME), ("pmap_release: got wrong ptd page")); #endif - m->wire_count--; - atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free(m); } #ifdef PAE @@ -2088,6 +2096,9 @@ out: m_pc = free; free = (void *)m_pc->object; /* Recycle a freed page table page. */ + KASSERT((m_pc->oflags & VPO_UNMANAGED) != 0, + ("pmap_pv_reclaim: recycled page table page %p not unmanaged", + m_pc)); m_pc->wire_count = 1; atomic_add_int(&vm_cnt.v_wire_count, 1); } @@ -2144,7 +2155,6 @@ free_pv_chunk(struct pv_chunk *pc) /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); - vm_page_unwire(m, 0); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); } Modified: user/attilio/rm_vmobj_cache/sys/ia64/ia64/pmap.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/ia64/ia64/pmap.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/ia64/ia64/pmap.c Mon May 26 09:48:57 2014 (r266683) @@ -931,7 +931,6 @@ free_pv_chunk(struct pv_chunk *pc) PV_STAT(pc_chunk_frees++); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(IA64_RR_MASK((vm_offset_t)pc)); - vm_page_unwire(m, 0); vm_page_free(m); } Modified: user/attilio/rm_vmobj_cache/sys/ia64/ia64/uma_machdep.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/ia64/ia64/uma_machdep.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/ia64/ia64/uma_machdep.c Mon May 26 09:48:57 2014 (r266683) @@ -71,7 +71,5 @@ uma_small_free(void *mem, int size, u_in vm_page_t m; m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem)); - m->wire_count--; vm_page_free(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); } Modified: user/attilio/rm_vmobj_cache/sys/mips/mips/pmap.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/mips/mips/pmap.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/mips/mips/pmap.c Mon May 26 09:48:57 2014 (r266683) @@ -993,9 +993,14 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_ /* * If the page is finally unwired, simply free it. + * Fix-up the wire_count value to make the function to perform + * the free correctly. */ + if (m->wire_count != 0) + panic("_pmap_unwire_ptp: invalid wire count %u for the page %p", + m->wire_count, m); + ++m->wire_count; vm_page_free_zero(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); } /* @@ -1142,8 +1147,6 @@ _pmap_allocpte(pmap_t pmap, unsigned pte if (_pmap_allocpte(pmap, NUPDE + segindex, flags) == NULL) { /* alloc failed, release current */ - --m->wire_count; - atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } @@ -1225,8 +1228,6 @@ pmap_release(pmap_t pmap) ptdva = (vm_offset_t)pmap->pm_segtab; ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva)); - ptdpg->wire_count--; - atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(ptdpg); } @@ -1534,7 +1535,6 @@ free_pv_chunk(struct pv_chunk *pc) PV_STAT(pc_chunk_frees++); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc)); - vm_page_unwire(m, 0); vm_page_free(m); } Modified: user/attilio/rm_vmobj_cache/sys/mips/mips/uma_machdep.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/mips/mips/uma_machdep.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/mips/mips/uma_machdep.c Mon May 26 09:48:57 2014 (r266683) @@ -77,7 +77,5 @@ uma_small_free(void *mem, int size, u_in pa = MIPS_DIRECT_TO_PHYS((vm_offset_t)mem); m = PHYS_TO_VM_PAGE(pa); - m->wire_count--; vm_page_free(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); } Modified: user/attilio/rm_vmobj_cache/sys/powerpc/aim/uma_machdep.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/powerpc/aim/uma_machdep.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/powerpc/aim/uma_machdep.c Mon May 26 09:48:57 2014 (r266683) @@ -91,8 +91,6 @@ uma_small_free(void *mem, int size, u_in (vm_offset_t)mem + PAGE_SIZE); m = PHYS_TO_VM_PAGE((vm_offset_t)mem); - m->wire_count--; vm_page_free(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); atomic_subtract_int(&hw_uma_mdpages, 1); } Modified: user/attilio/rm_vmobj_cache/sys/powerpc/booke/pmap.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/powerpc/booke/pmap.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/powerpc/booke/pmap.c Mon May 26 09:48:57 2014 (r266683) @@ -648,8 +648,13 @@ ptbl_free(mmu_t mmu, pmap_t pmap, unsign va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); pa = pte_vatopa(mmu, kernel_pmap, va); m = PHYS_TO_VM_PAGE(pa); + + /* Fix-up the wire_count to make free perform correctly. */ + if (m->wire_count != 0) + panic("ptbl_free: invalid wire count %u for page %p", + m->wire_count, m); + ++m->wire_count; vm_page_free_zero(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); mmu_booke_kremove(mmu, va); } Modified: user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/pmap.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/pmap.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/pmap.c Mon May 26 09:48:57 2014 (r266683) @@ -1292,8 +1292,6 @@ pmap_release(pmap_t pm) while (!TAILQ_EMPTY(&obj->memq)) { m = TAILQ_FIRST(&obj->memq); m->md.pmap = NULL; - m->wire_count--; - atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); } VM_OBJECT_WUNLOCK(obj); Modified: user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/vm_machdep.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/vm_machdep.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/vm_machdep.c Mon May 26 09:48:57 2014 (r266683) @@ -546,7 +546,5 @@ uma_small_free(void *mem, int size, u_in PMAP_STATS_INC(uma_nsmall_free); m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem)); - m->wire_count--; vm_page_free(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); } Modified: user/attilio/rm_vmobj_cache/sys/vm/uma_core.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/vm/uma_core.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/vm/uma_core.c Mon May 26 09:48:57 2014 (r266683) @@ -1150,10 +1150,8 @@ noobj_alloc(uma_zone_t zone, int bytes, * Page allocation failed, free intermediate pages and * exit. */ - TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { - vm_page_unwire(p, 0); + TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) vm_page_free(p); - } return (NULL); } *flags = UMA_SLAB_PRIV; Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_kern.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/vm/vm_kern.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/vm/vm_kern.c Mon May 26 09:48:57 2014 (r266683) @@ -193,7 +193,6 @@ retry: i -= PAGE_SIZE; m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); - vm_page_unwire(m, 0); vm_page_free(m); } vmem_free(vmem, addr, size); @@ -367,7 +366,6 @@ retry: i -= PAGE_SIZE; m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); - vm_page_unwire(m, 0); vm_page_free(m); } VM_OBJECT_WUNLOCK(object); @@ -401,7 +399,6 @@ kmem_unback(vm_object_t object, vm_offse VM_OBJECT_WLOCK(object); for (i = 0; i < size; i += PAGE_SIZE) { m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); - vm_page_unwire(m, 0); vm_page_free(m); } VM_OBJECT_WUNLOCK(object); Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_object.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/vm/vm_object.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/vm/vm_object.c Mon May 26 09:48:57 2014 (r266683) @@ -1951,7 +1951,10 @@ again: if ((options & OBJPR_NOTWIRED) != 0 && wirings != 0) goto next; pmap_remove_all(p); - /* Account for removal of wired mappings. */ + /* + * Account for removal of wired mappings. + * The object will not contain unmanaged pages. + */ if (wirings != 0) { KASSERT(p->wire_count == wirings, ("inconsistent wire count %d %d %p", Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_page.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/vm/vm_page.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/vm/vm_page.c Mon May 26 09:48:57 2014 (r266683) @@ -1611,7 +1611,7 @@ vm_page_alloc(vm_object_t object, vm_pin if (vp != NULL) vdrop(vp); pagedaemon_wakeup(); - if (req & VM_ALLOC_WIRED) { + if ((req & VM_ALLOC_WIRED) != 0 && unmanaged == 0) { atomic_subtract_int(&vm_cnt.v_wire_count, 1); m->wire_count = 0; } @@ -1807,11 +1807,10 @@ retry: &deferred_vdrop_list); if (vm_paging_needed()) pagedaemon_wakeup(); - atomic_subtract_int(&vm_cnt.v_wire_count, - npages); for (m_tmp = m, m = m_ret; m < &m_ret[npages]; m++) { - m->wire_count = 0; + m->wire_count = 1; + m->oflags = VPO_UNMANAGED; if (m >= m_tmp) m->object = NULL; vm_page_free(m); @@ -2232,9 +2231,15 @@ vm_page_free_toq(vm_page_t m) vm_page_lock_assert(m, MA_OWNED); KASSERT(!pmap_page_is_mapped(m), ("vm_page_free_toq: freeing mapped page %p", m)); - } else + } else { KASSERT(m->queue == PQ_NONE, ("vm_page_free_toq: unmanaged page %p is queued", m)); + KASSERT(m->wire_count == 1, + ("vm_page_free_toq: invalid wired count %u for unmanaged page %p", + m->wire_count, m)); + m->wire_count--; + atomic_subtract_int(&vm_cnt.v_wire_count, 1); + } PCPU_INC(cnt.v_tfree); if (vm_page_sbusied(m)) @@ -2363,10 +2368,10 @@ vm_page_unwire(vm_page_t m, int activate if (m->wire_count > 0) { m->wire_count--; if (m->wire_count == 0) { + if ((m->oflags & VPO_UNAMANGED) != 0) + panic("vm_page_unwire: completely unwired an unmanaged page %p", + m); atomic_subtract_int(&vm_cnt.v_wire_count, 1); - if ((m->oflags & VPO_UNMANAGED) != 0 || - m->object == NULL) - return; if (!activate) m->flags &= ~PG_WINATCFLS; vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m); Modified: user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_idpgtbl.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_idpgtbl.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_idpgtbl.c Mon May 26 09:48:57 2014 (r266683) @@ -373,17 +373,18 @@ retry: */ m = dmar_pgalloc(ctx->pgtbl_obj, idx, flags | DMAR_PGF_ZERO); - if (m == NULL) - return (NULL); /* - * Prevent potential free while pgtbl_obj is + * If a page is successfully returned, it is assumed + * that the page is properly wired already. This + * prevent potential free while pgtbl_obj is * unlocked in the recursive call to * ctx_pgtbl_map_pte(), if other thread did * pte write and clean while the lock if * dropped. */ - m->wire_count++; + if (m == NULL) + return (NULL); sfp = NULL; ptep = ctx_pgtbl_map_pte(ctx, base, lvl - 1, flags, @@ -391,14 +392,12 @@ retry: if (ptep == NULL) { KASSERT(m->pindex != 0, ("loosing root page %p", ctx)); - m->wire_count--; dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags); return (NULL); } dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W | VM_PAGE_TO_PHYS(m)); sf_buf_page(sfp)->wire_count += 1; - m->wire_count--; dmar_unmap_pgtbl(sfp, DMAR_IS_COHERENT(ctx->dmar)); /* Only executed once. */ goto retry; @@ -573,7 +572,7 @@ ctx_unmap_clear_pte(struct dmar_ctx *ctx *sf = NULL; } m->wire_count--; - if (m->wire_count != 0) + if (m->wire_count != 1) return; KASSERT(lvl != 0, ("lost reference (lvl) on root pg ctx %p base %jx lvl %d", @@ -685,8 +684,6 @@ ctx_alloc_pgtbl(struct dmar_ctx *ctx) DMAR_CTX_PGLOCK(ctx); m = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_WAITOK | DMAR_PGF_ZERO | DMAR_PGF_OBJL); - /* No implicit free of the top level page table page. */ - m->wire_count = 1; DMAR_CTX_PGUNLOCK(ctx); return (0); } @@ -716,7 +713,7 @@ ctx_free_pgtbl(struct dmar_ctx *ctx) /* Obliterate wire_counts */ VM_OBJECT_ASSERT_WLOCKED(obj); for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m)) - m->wire_count = 0; + m->wire_count = 1; VM_OBJECT_WUNLOCK(obj); vm_object_deallocate(obj); } Modified: user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_utils.c ============================================================================== --- user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_utils.c Mon May 26 08:52:13 2014 (r266682) +++ user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_utils.c Mon May 26 09:48:57 2014 (r266683) @@ -302,7 +302,6 @@ dmar_pgfree(vm_object_t obj, vm_pindex_t VM_OBJECT_WLOCK(obj); m = vm_page_lookup(obj, idx); if (m != NULL) { - vm_page_unwire(m, 0); vm_page_free(m); atomic_subtract_int(&dmar_tbl_pagecnt, 1); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201405260948.s4Q9mvaR009596>