Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 16 Jun 2014 18:33:33 +0000 (UTC)
From:      Attilio Rao <attilio@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-user@freebsd.org
Subject:   svn commit: r267550 - in user/attilio/rm_vmobj_cache/sys: amd64/amd64 arm/arm dev/drm2/i915 dev/drm2/ttm i386/i386 i386/xen ia64/ia64 kern mips/mips powerpc/aim powerpc/booke sparc64/sparc64 vm x86...
Message-ID:  <201406161833.s5GIXXeK054370@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: attilio
Date: Mon Jun 16 18:33:32 2014
New Revision: 267550
URL: http://svnweb.freebsd.org/changeset/base/267550

Log:
  Parify with head.
  The patch for unmanaged pages is in the repository and we need to make
  progress on other items, but it can be re-evaluated later on.

Modified:
  user/attilio/rm_vmobj_cache/sys/amd64/amd64/pmap.c
  user/attilio/rm_vmobj_cache/sys/amd64/amd64/uma_machdep.c
  user/attilio/rm_vmobj_cache/sys/arm/arm/machdep.c
  user/attilio/rm_vmobj_cache/sys/dev/drm2/i915/i915_gem_gtt.c
  user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_page_alloc.c
  user/attilio/rm_vmobj_cache/sys/i386/i386/pmap.c
  user/attilio/rm_vmobj_cache/sys/i386/xen/pmap.c
  user/attilio/rm_vmobj_cache/sys/ia64/ia64/pmap.c
  user/attilio/rm_vmobj_cache/sys/ia64/ia64/uma_machdep.c
  user/attilio/rm_vmobj_cache/sys/kern/kern_sharedpage.c
  user/attilio/rm_vmobj_cache/sys/kern/vfs_bio.c
  user/attilio/rm_vmobj_cache/sys/mips/mips/pmap.c
  user/attilio/rm_vmobj_cache/sys/mips/mips/uma_machdep.c
  user/attilio/rm_vmobj_cache/sys/powerpc/aim/slb.c
  user/attilio/rm_vmobj_cache/sys/powerpc/aim/uma_machdep.c
  user/attilio/rm_vmobj_cache/sys/powerpc/booke/pmap.c
  user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/pmap.c
  user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/vm_machdep.c
  user/attilio/rm_vmobj_cache/sys/vm/uma_core.c
  user/attilio/rm_vmobj_cache/sys/vm/vm_fault.c
  user/attilio/rm_vmobj_cache/sys/vm/vm_kern.c
  user/attilio/rm_vmobj_cache/sys/vm/vm_object.c
  user/attilio/rm_vmobj_cache/sys/vm/vm_page.c
  user/attilio/rm_vmobj_cache/sys/vm/vm_page.h
  user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_idpgtbl.c

Modified: user/attilio/rm_vmobj_cache/sys/amd64/amd64/pmap.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/amd64/amd64/pmap.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/amd64/amd64/pmap.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -2012,17 +2012,7 @@ pmap_free_zero_pages(struct spglist *fre
 
 	while ((m = SLIST_FIRST(free)) != NULL) {
 		SLIST_REMOVE_HEAD(free, plinks.s.ss);
-
-		/*
-		 * Preserve the page's PG_ZERO setting.
-		 * However, as the pages are unmanaged, fix-up the wired count
-		 * to perform a correct free.
-		 */
-		if (m->wire_count != 0)
-		panic("pmap_free_zero_pages: wrong wire count %u for page %p",
-			    m->wire_count, m);
-		m->wire_count = 1;
-		atomic_add_int(&vm_cnt.v_wire_count, 1);
+		/* Preserve the page's PG_ZERO setting. */
 		vm_page_free_toq(m);
 	}
 }
@@ -2339,6 +2329,8 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t 
 			/* Have to allocate a new pdp, recurse */
 			if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
 			    lockp) == NULL) {
+				--m->wire_count;
+				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 				vm_page_free_zero(m);
 				return (NULL);
 			}
@@ -2370,6 +2362,8 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t 
 			/* Have to allocate a new pd, recurse */
 			if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 			    lockp) == NULL) {
+				--m->wire_count;
+				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 				vm_page_free_zero(m);
 				return (NULL);
 			}
@@ -2382,6 +2376,9 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t 
 				/* Have to allocate a new pd, recurse */
 				if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 				    lockp) == NULL) {
+					--m->wire_count;
+					atomic_subtract_int(&vm_cnt.v_wire_count,
+					    1);
 					vm_page_free_zero(m);
 					return (NULL);
 				}
@@ -2519,6 +2516,8 @@ pmap_release(pmap_t pmap)
 		pmap->pm_pml4[DMPML4I + i] = 0;
 	pmap->pm_pml4[PML4PML4I] = 0;	/* Recursive Mapping */
 
+	m->wire_count--;
+	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 	vm_page_free_zero(m);
 	if (pmap->pm_pcid != -1)
 		free_unr(&pcid_unr, pmap->pm_pcid);
@@ -2816,9 +2815,6 @@ reclaim_pv_chunk(pmap_t locked_pmap, str
 		m_pc = SLIST_FIRST(&free);
 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
 		/* Recycle a freed page table page. */
-		KASSERT((m_pc->oflags & VPO_UNMANAGED) != 0,
-	    ("reclaim_pv_chunk: recycled page table page %p not unmanaged",
-		    m_pc));
 		m_pc->wire_count = 1;
 		atomic_add_int(&vm_cnt.v_wire_count, 1);
 	}

Modified: user/attilio/rm_vmobj_cache/sys/amd64/amd64/uma_machdep.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/amd64/amd64/uma_machdep.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/amd64/amd64/uma_machdep.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -78,5 +78,7 @@ uma_small_free(void *mem, int size, u_in
 	pa = DMAP_TO_PHYS((vm_offset_t)mem);
 	dump_drop_page(pa);
 	m = PHYS_TO_VM_PAGE(pa);
+	m->wire_count--;
 	vm_page_free(m);
+	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 }

Modified: user/attilio/rm_vmobj_cache/sys/arm/arm/machdep.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/arm/arm/machdep.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/arm/arm/machdep.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -387,8 +387,7 @@ cpu_startup(void *dummy)
 	pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
 	arm_lock_cache_line(ARM_TP_ADDRESS);
 #else
-	m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO |
-	    VM_ALLOC_WIRED);
+	m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
 	pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
 #endif
 	*(uint32_t *)ARM_RAS_START = 0;

Modified: user/attilio/rm_vmobj_cache/sys/dev/drm2/i915/i915_gem_gtt.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/dev/drm2/i915/i915_gem_gtt.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/dev/drm2/i915/i915_gem_gtt.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -208,6 +208,7 @@ i915_gem_cleanup_aliasing_ppgtt(struct d
 		if (m != NULL) {
 			vm_page_unwire(m, PQ_INACTIVE);
 			vm_page_free(m);
+		}
 	}
 	free(ppgtt->pt_pages, DRM_I915_GEM);
 	free(ppgtt, DRM_I915_GEM);

Modified: user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_page_alloc.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_page_alloc.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/dev/drm2/ttm/ttm_page_alloc.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -492,7 +492,7 @@ static int ttm_alloc_new_pages(struct pg
 	unsigned max_cpages = min(count,
 			(unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
 
-	aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+	aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
 	    ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
 	    VM_ALLOC_ZERO : 0);
 	
@@ -708,7 +708,7 @@ static int ttm_get_pages(vm_page_t *page
 	unsigned count;
 	int r;
 
-	aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+	aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 	    ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
 
 	/* No pool for cached pages */

Modified: user/attilio/rm_vmobj_cache/sys/i386/i386/pmap.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/i386/i386/pmap.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/i386/i386/pmap.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -1578,17 +1578,7 @@ pmap_free_zero_pages(struct spglist *fre
 
 	while ((m = SLIST_FIRST(free)) != NULL) {
 		SLIST_REMOVE_HEAD(free, plinks.s.ss);
-
-		/*
-		 * Preserve the page's PG_ZERO setting.
-		 * However, as the pages are unmanaged, fix-up the wired count
-		 * to perform a correct free.
-		 */
-		if (m->wire_count != 0)
-		panic("pmap_free_zero_pages: wrong wire count %u for page %p",
-			    m->wire_count, m);
-		m->wire_count = 1;
-		atomic_add_int(&vm_cnt.v_wire_count, 1);
+		/* Preserve the page's PG_ZERO setting. */
 		vm_page_free_toq(m);
 	}
 }
@@ -2059,6 +2049,8 @@ pmap_release(pmap_t pmap)
 		KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
 		    ("pmap_release: got wrong ptd page"));
 #endif
+		m->wire_count--;
+		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 		vm_page_free_zero(m);
 	}
 }
@@ -2320,9 +2312,6 @@ out:
 		m_pc = SLIST_FIRST(&free);
 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
 		/* Recycle a freed page table page. */
-		KASSERT((m_pc->oflags & VPO_UNMANAGED) != 0,
-	    ("pmap_pv_reclaim: recycled page table page %p not unmanaged",
-		    m_pc));
 		m_pc->wire_count = 1;
 		atomic_add_int(&vm_cnt.v_wire_count, 1);
 	}

Modified: user/attilio/rm_vmobj_cache/sys/i386/xen/pmap.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/i386/xen/pmap.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/i386/xen/pmap.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -1336,16 +1336,6 @@ pmap_free_zero_pages(vm_page_t free)
 		m = free;
 		free = (void *)m->object;
 		m->object = NULL;
-
-		/*
-		 * As the pages are unmanaged, fix-up the wired count
-		 * to perform a correct free.
-		 */
-		if (m->wire_count != 0)
-		panic("pmap_free_zero_pages: wrong wire count %u for page %p",
-			    m->wire_count, m);
-		m->wire_count = 1;
-		atomic_add_int(&vm_cnt.v_wire_count, 1);
 		vm_page_free_zero(m);
 	}
 }
@@ -1822,6 +1812,8 @@ pmap_release(pmap_t pmap)
 			KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME),
 			    ("pmap_release: got wrong ptd page"));
 #endif
+		m->wire_count--;
+		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 		vm_page_free(m);
 	}
 #ifdef PAE
@@ -2096,9 +2088,6 @@ out:
 		m_pc = free;
 		free = (void *)m_pc->object;
 		/* Recycle a freed page table page. */
-		KASSERT((m_pc->oflags & VPO_UNMANAGED) != 0,
-	    ("pmap_pv_reclaim: recycled page table page %p not unmanaged",
-		    m_pc));
 		m_pc->wire_count = 1;
 		atomic_add_int(&vm_cnt.v_wire_count, 1);
 	}

Modified: user/attilio/rm_vmobj_cache/sys/ia64/ia64/pmap.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/ia64/ia64/pmap.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/ia64/ia64/pmap.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -293,8 +293,9 @@ pmap_alloc_vhpt(void)
 	vm_size_t size;
 
 	size = 1UL << pmap_vhpt_log2size;
-	m = vm_page_alloc_contig(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ,
-	    atop(size), 0UL, ~0UL, size, 0UL, VM_MEMATTR_DEFAULT);
+	m = vm_page_alloc_contig(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
+	    VM_ALLOC_WIRED, atop(size), 0UL, ~0UL, size, 0UL,
+	    VM_MEMATTR_DEFAULT);
 	if (m != NULL) {
 		vhpt = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
 		pmap_initialize_vhpt(vhpt);

Modified: user/attilio/rm_vmobj_cache/sys/ia64/ia64/uma_machdep.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/ia64/ia64/uma_machdep.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/ia64/ia64/uma_machdep.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -71,5 +71,7 @@ uma_small_free(void *mem, int size, u_in
 	vm_page_t m;
 
 	m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem));
+	m->wire_count--;
 	vm_page_free(m);
+	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 }

Modified: user/attilio/rm_vmobj_cache/sys/kern/kern_sharedpage.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/kern/kern_sharedpage.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/kern/kern_sharedpage.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -108,8 +108,7 @@ shared_page_init(void *dummy __unused)
 	shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
 	    VM_PROT_DEFAULT, 0, NULL);
 	VM_OBJECT_WLOCK(shared_page_obj);
-	m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_NOBUSY | VM_ALLOC_ZERO |
-	    VM_ALLOC_WIRED);
+	m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_NOBUSY | VM_ALLOC_ZERO);
 	m->valid = VM_PAGE_BITS_ALL;
 	VM_OBJECT_WUNLOCK(shared_page_obj);
 	addr = kva_alloc(PAGE_SIZE);

Modified: user/attilio/rm_vmobj_cache/sys/kern/vfs_bio.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/kern/vfs_bio.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/kern/vfs_bio.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -4295,7 +4295,9 @@ vm_hold_free_pages(struct buf *bp, int n
 		if (vm_page_sbusied(p))
 			printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
 			    (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
+		p->wire_count--;
 		vm_page_free(p);
+		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 	}
 	bp->b_npages = newnpages;
 }

Modified: user/attilio/rm_vmobj_cache/sys/mips/mips/pmap.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/mips/mips/pmap.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/mips/mips/pmap.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -993,14 +993,9 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_
 
 	/*
 	 * If the page is finally unwired, simply free it.
-	 * Fix-up the wire_count value to make the function to perform
-	 * the free correctly.
 	 */
-	if (m->wire_count != 0)
-		panic("_pmap_unwire_ptp: invalid wire count %u for the page %p",
-		    m->wire_count, m);
-	++m->wire_count;
 	vm_page_free_zero(m);
+	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 }
 
 /*
@@ -1052,7 +1047,8 @@ pmap_alloc_direct_page(unsigned int inde
 {
 	vm_page_t m;
 
-	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_ZERO);
+	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED |
+	    VM_ALLOC_ZERO);
 	if (m == NULL)
 		return (NULL);
 
@@ -1147,6 +1143,8 @@ _pmap_allocpte(pmap_t pmap, unsigned pte
 			if (_pmap_allocpte(pmap, NUPDE + segindex, 
 			    flags) == NULL) {
 				/* alloc failed, release current */
+				--m->wire_count;
+				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 				vm_page_free_zero(m);
 				return (NULL);
 			}
@@ -1228,6 +1226,8 @@ pmap_release(pmap_t pmap)
 	ptdva = (vm_offset_t)pmap->pm_segtab;
 	ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
 
+	ptdpg->wire_count--;
+	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 	vm_page_free_zero(ptdpg);
 }
 
@@ -1581,7 +1581,8 @@ retry:
 		}
 	}
 	/* No free items, allocate another chunk */
-	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, VM_ALLOC_NORMAL);
+	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, VM_ALLOC_NORMAL |
+	    VM_ALLOC_WIRED);
 	if (m == NULL) {
 		if (try) {
 			pv_entry_count--;

Modified: user/attilio/rm_vmobj_cache/sys/mips/mips/uma_machdep.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/mips/mips/uma_machdep.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/mips/mips/uma_machdep.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -49,7 +49,7 @@ uma_small_alloc(uma_zone_t zone, int byt
 	void *va;
 
 	*flags = UMA_SLAB_PRIV;
-	pflags = malloc2vm_flags(wait);
+	pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
 
 	for (;;) {
 		m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, pflags);
@@ -77,5 +77,7 @@ uma_small_free(void *mem, int size, u_in
 
 	pa = MIPS_DIRECT_TO_PHYS((vm_offset_t)mem);
 	m = PHYS_TO_VM_PAGE(pa);
+	m->wire_count--;
 	vm_page_free(m);
+	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 }

Modified: user/attilio/rm_vmobj_cache/sys/powerpc/aim/slb.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/powerpc/aim/slb.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/powerpc/aim/slb.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -484,7 +484,7 @@ slb_uma_real_alloc(uma_zone_t zone, int 
 		realmax = platform_real_maxaddr();
 
 	*flags = UMA_SLAB_PRIV;
-	pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ;
+	pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
 
 	for (;;) {
 		m = vm_page_alloc_contig(NULL, 0, pflags, 1, 0, realmax,

Modified: user/attilio/rm_vmobj_cache/sys/powerpc/aim/uma_machdep.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/powerpc/aim/uma_machdep.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/powerpc/aim/uma_machdep.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -91,6 +91,8 @@ uma_small_free(void *mem, int size, u_in
 		    (vm_offset_t)mem + PAGE_SIZE);
 
 	m = PHYS_TO_VM_PAGE((vm_offset_t)mem);
+	m->wire_count--;
 	vm_page_free(m);
+	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 	atomic_subtract_int(&hw_uma_mdpages, 1);
 }

Modified: user/attilio/rm_vmobj_cache/sys/powerpc/booke/pmap.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/powerpc/booke/pmap.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/powerpc/booke/pmap.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -648,13 +648,8 @@ ptbl_free(mmu_t mmu, pmap_t pmap, unsign
 		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
 		pa = pte_vatopa(mmu, kernel_pmap, va);
 		m = PHYS_TO_VM_PAGE(pa);
-
-		/* Fix-up the wire_count to make free perform correctly. */
-		if (m->wire_count != 0)
-			panic("ptbl_free: invalid wire count %u for page %p",
-			    m->wire_count, m);
-		++m->wire_count;
 		vm_page_free_zero(m);
+		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 		mmu_booke_kremove(mmu, va);
 	}
 

Modified: user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/pmap.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/pmap.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/pmap.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -1292,6 +1292,8 @@ pmap_release(pmap_t pm)
 	while (!TAILQ_EMPTY(&obj->memq)) {
 		m = TAILQ_FIRST(&obj->memq);
 		m->md.pmap = NULL;
+		m->wire_count--;
+		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 		vm_page_free_zero(m);
 	}
 	VM_OBJECT_WUNLOCK(obj);

Modified: user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/vm_machdep.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/vm_machdep.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/sparc64/sparc64/vm_machdep.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -546,5 +546,7 @@ uma_small_free(void *mem, int size, u_in
 
 	PMAP_STATS_INC(uma_nsmall_free);
 	m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem));
+	m->wire_count--;
 	vm_page_free(m);
+	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 }

Modified: user/attilio/rm_vmobj_cache/sys/vm/uma_core.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/vm/uma_core.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/vm/uma_core.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -1155,7 +1155,7 @@ noobj_alloc(uma_zone_t zone, int bytes, 
 		 */
 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
 			vm_page_unwire(p, PQ_INACTIVE);
-			vm_page_free(p);
+			vm_page_free(p); 
 		}
 		return (NULL);
 	}

Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_fault.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/vm/vm_fault.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/vm/vm_fault.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -426,8 +426,6 @@ RetryFault:;
 				if (fs.object->type != OBJT_VNODE &&
 				    fs.object->backing_object == NULL)
 					alloc_req |= VM_ALLOC_ZERO;
-				if ((fs.object->flags & OBJ_UNMANAGED) != 0)
-					alloc_req |= VM_ALLOC_WIRED;
 				fs.m = vm_page_alloc(fs.object, fs.pindex,
 				    alloc_req);
 			}
@@ -1442,7 +1440,7 @@ vm_fault_additional_pages(m, rbehind, ra
 	vm_object_t object;
 	vm_pindex_t pindex, startpindex, endpindex, tpindex;
 	vm_page_t rtm;
-	int alloc_req, cbehind, cahead;
+	int cbehind, cahead;
 
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 
@@ -1471,10 +1469,6 @@ vm_fault_additional_pages(m, rbehind, ra
 		rbehind = cbehind;
 	}
 
-	alloc_req = VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED;
-	if ((object->flags & OBJ_UNMANAGED) != 0)
-		alloc_req |= VM_ALLOC_WIRED;
-
 	/*
 	 * scan backward for the read behind pages -- in memory 
 	 */
@@ -1494,7 +1488,8 @@ vm_fault_additional_pages(m, rbehind, ra
 		for (i = 0, tpindex = pindex - 1; tpindex >= startpindex &&
 		    tpindex < pindex; i++, tpindex--) {
 
-			rtm = vm_page_alloc(object, tpindex, alloc_req);
+			rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
+			    VM_ALLOC_IFNOTCACHED);
 			if (rtm == NULL) {
 				/*
 				 * Shift the allocated pages to the
@@ -1532,7 +1527,8 @@ vm_fault_additional_pages(m, rbehind, ra
 
 	for (; tpindex < endpindex; i++, tpindex++) {
 
-		rtm = vm_page_alloc(object, tpindex, alloc_req);
+		rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
+		    VM_ALLOC_IFNOTCACHED);
 		if (rtm == NULL) {
 			break;
 		}

Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_kern.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/vm/vm_kern.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/vm/vm_kern.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -169,7 +169,7 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t 
 	if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
 		return (0);
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
-	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
+	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
 	VM_OBJECT_WLOCK(object);
 	for (i = 0; i < size; i += PAGE_SIZE) {
 		tries = 0;
@@ -232,7 +232,7 @@ kmem_alloc_contig(struct vmem *vmem, vm_
 	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
 		return (0);
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
-	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
+	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
 	VM_OBJECT_WLOCK(object);
 	tries = 0;
 retry:

Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_object.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/vm/vm_object.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/vm/vm_object.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -1951,10 +1951,7 @@ again:
 			if ((options & OBJPR_NOTWIRED) != 0 && wirings != 0)
 				goto next;
 			pmap_remove_all(p);
-			/*
-			 * Account for removal of wired mappings.
-			 * The object will not contain unmanaged pages.
-			 */
+			/* Account for removal of wired mappings. */
 			if (wirings != 0) {
 				KASSERT(p->wire_count == wirings,
 				    ("inconsistent wire count %d %d %p",
@@ -2044,12 +2041,8 @@ vm_object_populate(vm_object_t object, v
 	int rv;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
-	KASSERT((object->flags & OBJ_UNMANAGED) != 0,
-            ("vm_object_populate: object %p cannot contain unmanaged pages",
-	    object));
 	for (pindex = start; pindex < end; pindex++) {
-		m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
-		    VM_ALLOC_WIRED);
+		m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
 		if (m->valid != VM_PAGE_BITS_ALL) {
 			ma[0] = m;
 			rv = vm_pager_get_pages(object, ma, 1, 0);

Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_page.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/vm/vm_page.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/vm/vm_page.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -1450,7 +1450,7 @@ vm_page_alloc(vm_object_t object, vm_pin
 	struct vnode *vp = NULL;
 	vm_object_t m_object;
 	vm_page_t m, mpred;
-	int flags, req_class, unmanaged;
+	int flags, req_class;
 
 	mpred = 0;	/* XXX: pacify gcc */
 	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
@@ -1462,10 +1462,6 @@ vm_page_alloc(vm_object_t object, vm_pin
 	if (object != NULL)
 		VM_OBJECT_ASSERT_WLOCKED(object);
 
-	unmanaged = (object == NULL || (object->flags & OBJ_UNMANAGED) != 0);
-	KASSERT(unmanaged == 0 || (req & VM_ALLOC_WIRED) != 0,
-	    ("vm_page_alloc: unmanaged but unwired request req(%x)", req));
-
 	req_class = req & VM_ALLOC_CLASS_MASK;
 
 	/*
@@ -1589,7 +1585,8 @@ vm_page_alloc(vm_object_t object, vm_pin
 		flags |= PG_NODUMP;
 	m->flags = flags;
 	m->aflags = 0;
-	m->oflags = (unmanaged != 0) ? VPO_UNMANAGED : 0;
+	m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
+	    VPO_UNMANAGED : 0;
 	m->busy_lock = VPB_UNBUSIED;
 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
 		m->busy_lock = VPB_SINGLE_EXCLUSIVER;
@@ -1611,7 +1608,7 @@ vm_page_alloc(vm_object_t object, vm_pin
 			if (vp != NULL)
 				vdrop(vp);
 			pagedaemon_wakeup();
-			if ((req & VM_ALLOC_WIRED) != 0 && unmanaged == 0) {
+			if (req & VM_ALLOC_WIRED) {
 				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 				m->wire_count = 0;
 			}
@@ -1679,8 +1676,6 @@ vm_page_alloc_contig_vdrop(struct spglis
  *
  *	The caller must always specify an allocation class.
  *
- *	The returned pages will all be wired.
- *
  *	allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs a page
@@ -1691,6 +1686,7 @@ vm_page_alloc_contig_vdrop(struct spglis
  *	VM_ALLOC_NOOBJ		page is not associated with an object and
  *				should not be exclusive busy 
  *	VM_ALLOC_SBUSY		shared busy the allocated page
+ *	VM_ALLOC_WIRED		wire the allocated page
  *	VM_ALLOC_ZERO		prefer a zeroed page
  *
  *	This routine may not sleep.
@@ -1712,8 +1708,6 @@ vm_page_alloc_contig(vm_object_t object,
 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
 	    ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,
 	    req));
-	KASSERT((req & VM_ALLOC_WIRED) == 0,
-	    ("vm_page_alloc_contig: VM_ALLOC_WIRED passed in req (%x)", req));
 	if (object != NULL) {
 		VM_OBJECT_ASSERT_WLOCKED(object);
 		KASSERT(object->type == OBJT_PHYS,
@@ -1781,7 +1775,8 @@ retry:
 		flags = PG_ZERO;
 	if ((req & VM_ALLOC_NODUMP) != 0)
 		flags |= PG_NODUMP;
-	atomic_add_int(&vm_cnt.v_wire_count, npages);
+	if ((req & VM_ALLOC_WIRED) != 0)
+		atomic_add_int(&vm_cnt.v_wire_count, npages);
 	if (object != NULL) {
 		if (object->memattr != VM_MEMATTR_DEFAULT &&
 		    memattr == VM_MEMATTR_DEFAULT)
@@ -1797,8 +1792,8 @@ retry:
 			if ((req & VM_ALLOC_SBUSY) != 0)
 				m->busy_lock = VPB_SHARERS_WORD(1);
 		}
-		m->wire_count = 1;
-
+		if ((req & VM_ALLOC_WIRED) != 0)
+			m->wire_count = 1;
 		/* Unmanaged pages don't use "act_count". */
 		m->oflags = VPO_UNMANAGED;
 		if (object != NULL) {
@@ -1807,10 +1802,13 @@ retry:
 				    &deferred_vdrop_list);
 				if (vm_paging_needed())
 					pagedaemon_wakeup();
+				if ((req & VM_ALLOC_WIRED) != 0)
+					atomic_subtract_int(&vm_cnt.v_wire_count,
+					    npages);
 				for (m_tmp = m, m = m_ret;
 				    m < &m_ret[npages]; m++) {
-					m->wire_count = 1;
-					m->oflags = VPO_UNMANAGED;
+					if ((req & VM_ALLOC_WIRED) != 0)
+						m->wire_count = 0;
 					if (m >= m_tmp)
 						m->object = NULL;
 					vm_page_free(m);
@@ -1885,8 +1883,6 @@ vm_page_alloc_init(vm_page_t m)
  *
  *	The caller must always specify an allocation class.
  *
- *	The returned page will be wired.
- *
  *	allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs a page
@@ -1895,6 +1891,7 @@ vm_page_alloc_init(vm_page_t m)
  *	optional allocation flags:
  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
  *				intends to allocate
+ *	VM_ALLOC_WIRED		wire the allocated page
  *	VM_ALLOC_ZERO		prefer a zeroed page
  *
  *	This routine may not sleep.
@@ -1907,10 +1904,6 @@ vm_page_alloc_freelist(int flind, int re
 	u_int flags;
 	int req_class;
 
-	KASSERT((req & VM_ALLOC_WIRED) == 0,
-	    ("vm_page_alloc_freelist: VM_ALLOC_WIRED passed in req (%x)",
-	    req));
-
 	req_class = req & VM_ALLOC_CLASS_MASK;
 
 	/*
@@ -1951,14 +1944,14 @@ vm_page_alloc_freelist(int flind, int re
 	if ((req & VM_ALLOC_ZERO) != 0)
 		flags = PG_ZERO;
 	m->flags &= flags;
-
-	/*
-	 * The page lock is not required for wiring a page that does
-	 * not belong to an object.
-	 */
-	atomic_add_int(&vm_cnt.v_wire_count, 1);
-	m->wire_count = 1;
-
+	if ((req & VM_ALLOC_WIRED) != 0) {
+		/*
+		 * The page lock is not required for wiring a page that does
+		 * not belong to an object.
+		 */
+		atomic_add_int(&vm_cnt.v_wire_count, 1);
+		m->wire_count = 1;
+	}
 	/* Unmanaged pages don't use "act_count". */
 	m->oflags = VPO_UNMANAGED;
 	if (drop != NULL)
@@ -2234,15 +2227,9 @@ vm_page_free_toq(vm_page_t m)
 		vm_page_lock_assert(m, MA_OWNED);
 		KASSERT(!pmap_page_is_mapped(m),
 		    ("vm_page_free_toq: freeing mapped page %p", m));
-	} else {
+	} else
 		KASSERT(m->queue == PQ_NONE,
 		    ("vm_page_free_toq: unmanaged page %p is queued", m));
-		KASSERT(m->wire_count == 1,
-	    ("vm_page_free_toq: invalid wired count %u for unmanaged page %p",
-		    m->wire_count, m));
-		m->wire_count--;
-		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
-	}
 	PCPU_INC(cnt.v_tfree);
 
 	if (vm_page_sbusied(m))
@@ -2348,7 +2335,10 @@ vm_page_wire(vm_page_t m)
  * paged again.  If paging is enabled, then the value of the parameter
  * "queue" determines the queue to which the page is added.
  *
- * If a page is fictitious or managed, then its wire count must always be one.
+ * However, unless the page belongs to an object, it is not enqueued because
+ * it cannot be paged out.
+ *
+ * If a page is fictitious, then its wire count must always be one.
  *
  * A managed page must be locked.
  */
@@ -2369,6 +2359,7 @@ vm_page_unwire(vm_page_t m, uint8_t queu
 	if (m->wire_count > 0) {
 		m->wire_count--;
 		if (m->wire_count == 0) {
+			atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 			if ((m->oflags & VPO_UNMANAGED) != 0 ||
 			    m->object == NULL)
 				return;

Modified: user/attilio/rm_vmobj_cache/sys/vm/vm_page.h
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/vm/vm_page.h	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/vm/vm_page.h	Mon Jun 16 18:33:32 2014	(r267550)
@@ -172,7 +172,6 @@ struct vm_page {
  * 	 under PV management cannot be paged out via the
  * 	 object/vm_page_t because there is no knowledge of their pte
  * 	 mappings, and such pages are also not on any PQ queue.
- *	 VPO_UNMANAGED pages are also mandatory wired.
  *
  */
 #define	VPO_UNUSED01	0x01		/* --available-- */

Modified: user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_idpgtbl.c
==============================================================================
--- user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_idpgtbl.c	Mon Jun 16 18:29:05 2014	(r267549)
+++ user/attilio/rm_vmobj_cache/sys/x86/iommu/intel_idpgtbl.c	Mon Jun 16 18:33:32 2014	(r267550)
@@ -373,18 +373,17 @@ retry:
 			 */
 			m = dmar_pgalloc(ctx->pgtbl_obj, idx, flags |
 			    DMAR_PGF_ZERO);
+			if (m == NULL)
+				return (NULL);
 
 			/*
-			 * If a page is successfully returned, it is assumed
-			 * that the page is properly wired already.  This
-			 * prevent potential free while pgtbl_obj is
+			 * Prevent potential free while pgtbl_obj is
 			 * unlocked in the recursive call to
 			 * ctx_pgtbl_map_pte(), if other thread did
 			 * pte write and clean while the lock if
 			 * dropped.
 			 */
-			if (m == NULL)
-				return (NULL);
+			m->wire_count++;
 
 			sfp = NULL;
 			ptep = ctx_pgtbl_map_pte(ctx, base, lvl - 1, flags,
@@ -392,12 +391,14 @@ retry:
 			if (ptep == NULL) {
 				KASSERT(m->pindex != 0,
 				    ("loosing root page %p", ctx));
+				m->wire_count--;
 				dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags);
 				return (NULL);
 			}
 			dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
 			    VM_PAGE_TO_PHYS(m));
 			sf_buf_page(sfp)->wire_count += 1;
+			m->wire_count--;
 			dmar_unmap_pgtbl(sfp, DMAR_IS_COHERENT(ctx->dmar));
 			/* Only executed once. */
 			goto retry;
@@ -572,7 +573,7 @@ ctx_unmap_clear_pte(struct dmar_ctx *ctx
 		*sf = NULL;
 	}
 	m->wire_count--;
-	if (m->wire_count != 1)
+	if (m->wire_count != 0)
 		return;
 	KASSERT(lvl != 0,
 	    ("lost reference (lvl) on root pg ctx %p base %jx lvl %d",
@@ -684,6 +685,8 @@ ctx_alloc_pgtbl(struct dmar_ctx *ctx)
 	DMAR_CTX_PGLOCK(ctx);
 	m = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_WAITOK |
 	    DMAR_PGF_ZERO | DMAR_PGF_OBJL);
+	/* No implicit free of the top level page table page. */
+	m->wire_count = 1;
 	DMAR_CTX_PGUNLOCK(ctx);
 	return (0);
 }
@@ -713,7 +716,7 @@ ctx_free_pgtbl(struct dmar_ctx *ctx)
 	/* Obliterate wire_counts */
 	VM_OBJECT_ASSERT_WLOCKED(obj);
 	for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
-		m->wire_count = 1;
+		m->wire_count = 0;
 	VM_OBJECT_WUNLOCK(obj);
 	vm_object_deallocate(obj);
 }



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201406161833.s5GIXXeK054370>