Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 13 Jul 2012 04:10:41 +0000 (UTC)
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r238414 - head/sys/amd64/amd64
Message-ID:  <201207130410.q6D4AfFa046496@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: alc
Date: Fri Jul 13 04:10:41 2012
New Revision: 238414
URL: http://svn.freebsd.org/changeset/base/238414

Log:
  Wring a few cycles out of pmap_enter().  In particular, on a user-space
  pmap, avoid walking the page table twice.

Modified:
  head/sys/amd64/amd64/pmap.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Fri Jul 13 01:25:11 2012	(r238413)
+++ head/sys/amd64/amd64/pmap.c	Fri Jul 13 04:10:41 2012	(r238414)
@@ -3454,8 +3454,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	    ("pmap_enter: page %p is not busy", m));
 	pa = VM_PAGE_TO_PHYS(m);
 	newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
-	if ((m->oflags & VPO_UNMANAGED) == 0)
-		newpte |= PG_MANAGED;
 	if ((prot & VM_PROT_WRITE) != 0)
 		newpte |= PG_RW;
 	if ((prot & VM_PROT_EXECUTE) == 0)
@@ -3477,14 +3475,22 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	 * In the case that a page table page is not
 	 * resident, we are creating it here.
 	 */
-	if (va < VM_MAXUSER_ADDRESS)
-		mpte = pmap_allocpte(pmap, va, &lock);
-
+retry:
 	pde = pmap_pde(pmap, va);
-	if (pde != NULL && (*pde & PG_V) != 0) {
-		if ((*pde & PG_PS) != 0)
-			panic("pmap_enter: attempted pmap_enter on 2MB page");
+	if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
+	    pmap_demote_pde_locked(pmap, pde, va, &lock))) {
 		pte = pmap_pde_to_pte(pde, va);
+		if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
+			mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
+			mpte->wire_count++;
+		}
+	} else if (va < VM_MAXUSER_ADDRESS) {
+		/*
+		 * Here if the pte page isn't mapped, or if it has been
+		 * deallocated.
+		 */
+		mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), &lock);
+		goto retry;
 	} else
 		panic("pmap_enter: invalid page directory va=%#lx", va);
 
@@ -3492,59 +3498,65 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	opa = origpte & PG_FRAME;
 
 	/*
-	 * Mapping has not changed, must be protection or wiring change.
+	 * Is the specified virtual address already mapped?
 	 */
-	if (origpte && (opa == pa)) {
+	if ((origpte & PG_V) != 0) {
 		/*
 		 * Wiring change, just update stats. We don't worry about
 		 * wiring PT pages as they remain resident as long as there
 		 * are valid mappings in them. Hence, if a user page is wired,
 		 * the PT page will be also.
 		 */
-		if (wired && ((origpte & PG_W) == 0))
+		if (wired && (origpte & PG_W) == 0)
 			pmap->pm_stats.wired_count++;
 		else if (!wired && (origpte & PG_W))
 			pmap->pm_stats.wired_count--;
 
 		/*
-		 * Remove extra pte reference
+		 * Remove the extra PT page reference.
 		 */
-		if (mpte)
-			mpte->wire_count--;
-
-		if ((origpte & PG_MANAGED) != 0)
-			om = m;
-		goto validate;
-	} 
-
-	/*
-	 * Mapping has changed, invalidate old range and fall through to
-	 * handle validating new mapping.
-	 */
-	if (opa) {
-		if (origpte & PG_W)
-			pmap->pm_stats.wired_count--;
-		if ((origpte & PG_MANAGED) != 0)
-			om = PHYS_TO_VM_PAGE(opa);
 		if (mpte != NULL) {
 			mpte->wire_count--;
 			KASSERT(mpte->wire_count > 0,
 			    ("pmap_enter: missing reference to page table page,"
 			     " va: 0x%lx", va));
 		}
-	} else
-		pmap_resident_count_inc(pmap, 1);
 
-	/*
-	 * Increment the counters.
-	 */
-	if (wired)
-		pmap->pm_stats.wired_count++;
+		/*
+		 * Has the mapping changed?
+		 */
+		if (opa == pa) {
+			/*
+			 * No, might be a protection or wiring change.
+			 */
+			if ((origpte & PG_MANAGED) != 0) {
+				newpte |= PG_MANAGED;
+				om = m;
+			}
+			if ((origpte & ~(PG_M | PG_A)) == newpte)
+				goto unchanged;
+			goto validate;
+		} else {
+			/*
+			 * Yes, fall through to validate the new mapping.
+			 */
+			if ((origpte & PG_MANAGED) != 0)
+				om = PHYS_TO_VM_PAGE(opa);
+		}
+	} else {
+		/*
+		 * Increment the counters.
+		 */
+		if (wired)
+			pmap->pm_stats.wired_count++;
+		pmap_resident_count_inc(pmap, 1);
+	}
 
 	/*
 	 * Enter on the PV list if part of our managed memory.
 	 */
-	if ((newpte & PG_MANAGED) != 0) {
+	if ((m->oflags & VPO_UNMANAGED) == 0) {
+		newpte |= PG_MANAGED;
 		pv = get_pv_entry(pmap, &lock);
 		pv->pv_va = va;
 		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
@@ -3554,45 +3566,44 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 validate:
 
 	/*
-	 * Update the PTE only if the mapping or protection/wiring bits are
-	 * different.
+	 * Update the PTE.
 	 */
-	if ((origpte & ~(PG_M | PG_A)) != newpte) {
-		newpte |= PG_A;
-		if ((access & VM_PROT_WRITE) != 0)
-			newpte |= PG_M;
-		if ((newpte & (PG_MANAGED | PG_RW)) == (PG_MANAGED | PG_RW))
-			vm_page_aflag_set(m, PGA_WRITEABLE);
-		if (origpte & PG_V) {
-			invlva = FALSE;
-			origpte = pte_load_store(pte, newpte);
-			if (origpte & PG_A) {
-				if (origpte & PG_MANAGED)
-					vm_page_aflag_set(om, PGA_REFERENCED);
-				if (opa != pa || ((origpte & PG_NX) == 0 &&
-				    (newpte & PG_NX) != 0))
-					invlva = TRUE;
-			}
-			if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
-				if ((origpte & PG_MANAGED) != 0)
-					vm_page_dirty(om);
-				if ((newpte & PG_RW) == 0)
-					invlva = TRUE;
-			}
-			if (opa != pa && (origpte & PG_MANAGED) != 0) {
-				CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
-				pmap_pvh_free(&om->md, pmap, va);
-				if ((om->aflags & PGA_WRITEABLE) != 0 &&
-				    TAILQ_EMPTY(&om->md.pv_list) &&
-				    ((om->flags & PG_FICTITIOUS) != 0 ||
-				    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
-					vm_page_aflag_clear(om, PGA_WRITEABLE);
-			}
-			if (invlva)
-				pmap_invalidate_page(pmap, va);
-		} else
-			pte_store(pte, newpte);
-	}
+	newpte |= PG_A;
+	if ((access & VM_PROT_WRITE) != 0)
+		newpte |= PG_M;
+	if ((newpte & (PG_MANAGED | PG_RW)) == (PG_MANAGED | PG_RW))
+		vm_page_aflag_set(m, PGA_WRITEABLE);
+	if ((origpte & PG_V) != 0) {
+		invlva = FALSE;
+		origpte = pte_load_store(pte, newpte);
+		if ((origpte & PG_A) != 0) {
+			if ((origpte & PG_MANAGED) != 0)
+				vm_page_aflag_set(om, PGA_REFERENCED);
+			if (opa != pa || ((origpte & PG_NX) == 0 &&
+			    (newpte & PG_NX) != 0))
+				invlva = TRUE;
+		}
+		if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+			if ((origpte & PG_MANAGED) != 0)
+				vm_page_dirty(om);
+			if ((newpte & PG_RW) == 0)
+				invlva = TRUE;
+		}
+		if (opa != pa && (origpte & PG_MANAGED) != 0) {
+			CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+			pmap_pvh_free(&om->md, pmap, va);
+			if ((om->aflags & PGA_WRITEABLE) != 0 &&
+			    TAILQ_EMPTY(&om->md.pv_list) &&
+			    ((om->flags & PG_FICTITIOUS) != 0 ||
+			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
+				vm_page_aflag_clear(om, PGA_WRITEABLE);
+		}
+		if (invlva)
+			pmap_invalidate_page(pmap, va);
+	} else
+		pte_store(pte, newpte);
+
+unchanged:
 
 	/*
 	 * If both the page table page and the reservation are fully



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201207130410.q6D4AfFa046496>