Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 23 May 2013 12:24:46 +0000 (UTC)
From:      Grzegorz Bernacki <gber@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r250931 - head/sys/arm/arm
Message-ID:  <201305231224.r4NCOksf092731@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: gber
Date: Thu May 23 12:24:46 2013
New Revision: 250931
URL: http://svnweb.freebsd.org/changeset/base/250931

Log:
  Rework and organize pmap_enter_locked() function.
  
  pmap_enter_locked() implementation was very ambiguous and confusing.
  Rearrange it so that each part of the mapping creation is separated.
  Avoid walking through the redundant conditions.
  Extract vector_page specific PTE setup from normal PTE setting.
  
  Submitted by:   Zbigniew Bodek <zbb@semihalf.com>
  Sponsored by:   The FreeBSD Foundation, Semihalf

Modified:
  head/sys/arm/arm/pmap-v6.c

Modified: head/sys/arm/arm/pmap-v6.c
==============================================================================
--- head/sys/arm/arm/pmap-v6.c	Thu May 23 12:23:18 2013	(r250930)
+++ head/sys/arm/arm/pmap-v6.c	Thu May 23 12:24:46 2013	(r250931)
@@ -2723,38 +2723,54 @@ do_l2b_alloc:
 	is_exec = is_refd = 0;
 
 	if (opte) {
-		/*
-		 * There is already a mapping at this address.
-		 * If the physical address is different, lookup the
-		 * vm_page.
-		 */
-		if (l2pte_pa(opte) != pa)
-			om = PHYS_TO_VM_PAGE(l2pte_pa(opte));
-		else
-			om = m;
-	} else
-		om = NULL;
-
-	if ((prot & (VM_PROT_ALL)) || !m) {
-		/*
-		 * - The access type indicates that we don't need
-		 *   to do referenced emulation.
-		 * OR
-		 * - The physical page has already been referenced
-		 *   so no need to re-do referenced emulation here.
-		 */
-		npte |= L2_S_REF;
+		if (l2pte_pa(opte) == pa) {
+			/*
+			 * We're changing the attrs of an existing mapping.
+			 */
+			if (m != NULL)
+				pmap_modify_pv(m, pmap, va,
+				    PVF_WRITE | PVF_WIRED, nflags);
+			is_exec |= PTE_BEEN_EXECD(opte);
+			is_refd |= PTE_BEEN_REFD(opte);
+			goto validate;
+		}
+		if ((om = PHYS_TO_VM_PAGE(l2pte_pa(opte)))) {
+			/*
+			 * Replacing an existing mapping with a new one.
+			 * It is part of our managed memory so we
+			 * must remove it from the PV list
+			 */
+			if ((pve = pmap_remove_pv(om, pmap, va))) {
+				is_exec |= PTE_BEEN_EXECD(opte);
+				is_refd |= PTE_BEEN_REFD(opte);
+		
+				if (m && ((m->oflags & VPO_UNMANAGED)))
+					pmap_free_pv_entry(pmap, pve);
+			}
+		}
 
-		if (m != NULL &&
-		    (m->oflags & VPO_UNMANAGED) == 0)
-			vm_page_aflag_set(m, PGA_REFERENCED);
 	} else {
 		/*
-		 * Need to do page referenced emulation.
+		 * Keep the stats up to date
 		 */
-		npte &= ~L2_S_REF;
+		l2b->l2b_occupancy++;
+		pmap->pm_stats.resident_count++;
 	}
 
+	/*
+	 * Enter on the PV list if part of our managed memory.
+	 */
+	if ((m && !(m->oflags & VPO_UNMANAGED))) {
+		if ((!pve) && (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
+			panic("pmap_enter: no pv entries");
+
+		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
+		("pmap_enter: managed mapping within the clean submap"));
+		KASSERT(pve != NULL, ("No pv"));
+		pmap_enter_pv(m, pve, pmap, va, nflags);
+	}
+
+validate:
 	/* Make the new PTE valid */
 	npte |= L2_S_PROTO;
 #ifdef SMP
@@ -2763,78 +2779,48 @@ do_l2b_alloc:
 	/* Set defaults first - kernel read access */
 	npte |= L2_APX;
 	npte |= L2_S_PROT_R;
+	/* Set "referenced" flag */
+	npte |= L2_S_REF;
 
 	/* Now tune APs as desired */
 	if (user)
 		npte |= L2_S_PROT_U;
-
-	if (prot & VM_PROT_WRITE) {
-		npte &= ~(L2_APX);
-
-		if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0) {
-			vm_page_aflag_set(m, PGA_WRITEABLE);
+	/*
+	 * If this is not a vector_page
+	 * then continue setting mapping parameters
+	 */
+	if (m != NULL) {
+		if (prot & (VM_PROT_ALL)) {
+			if ((m->oflags & VPO_UNMANAGED) == 0)
+				vm_page_aflag_set(m, PGA_REFERENCED);
+		} else {
 			/*
-			 * The access type and permissions indicate 
-			 * that the page will be written as soon as returned
-			 * from fault service.
-			 * Mark it dirty from the outset.
+			 * Need to do page referenced emulation.
 			 */
-			if ((access & VM_PROT_WRITE) != 0)
-				vm_page_dirty(m);
+			npte &= ~L2_S_REF;
 		}
-	}
-
-	if (!(prot & VM_PROT_EXECUTE) && m)
-		npte |= L2_XN;
 
-	if (m && (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE))
-		npte |= pte_l2_s_cache_mode;
+		if (prot & VM_PROT_WRITE) {
+			/* Write enable */
+			npte &= ~(L2_APX);
 
-	if (m && m == om) {
-		/*
-		 * We're changing the attrs of an existing mapping.
-		 */
-		pmap_modify_pv(m, pmap, va, PVF_WRITE | PVF_WIRED, nflags);
-		is_exec |= PTE_BEEN_EXECD(opte);
-		is_refd |= PTE_BEEN_REFD(opte);
-	} else {
-		/*
-		 * New mapping, or changing the backing page
-		 * of an existing mapping.
-		 */
-		if (om) {
-			/*
-			 * Replacing an existing mapping with a new one.
-			 * It is part of our managed memory so we
-			 * must remove it from the PV list
-			 */
-			if ((pve = pmap_remove_pv(om, pmap, va))) {
-				is_exec |= PTE_BEEN_EXECD(opte);
-				is_refd |= PTE_BEEN_REFD(opte);
-
-			    if (m && ((m->oflags & VPO_UNMANAGED)))
-				pmap_free_pv_entry(pmap, pve);
+			if ((m->oflags & VPO_UNMANAGED) == 0) {
+				vm_page_aflag_set(m, PGA_WRITEABLE);
+				/*
+				 * The access type and permissions indicate 
+				 * that the page will be written as soon as
+				 * returned from fault service.
+				 * Mark it dirty from the outset.
+				 */
+				if ((access & VM_PROT_WRITE) != 0)
+					vm_page_dirty(m);
 			}
 		}
+		if (!(prot & VM_PROT_EXECUTE))
+			npte |= L2_XN;
 
-		if ((m && !(m->oflags & VPO_UNMANAGED))) {
-			if ((!pve) &&
-			    (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
-				panic("pmap_enter: no pv entries");
-
-			KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
-			("pmap_enter: managed mapping within the clean submap"));
-			KASSERT(pve != NULL, ("No pv"));
-			pmap_enter_pv(m, pve, pmap, va, nflags);
-		}
-	}
-
-	/*
-	 * Keep the stats up to date
-	 */
-	if (opte == 0) {
-		l2b->l2b_occupancy++;
-		pmap->pm_stats.resident_count++;
+		if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
+			npte |= pte_l2_s_cache_mode;
 	}
 
 	CTR5(KTR_PMAP,"enter: pmap:%p va:%x prot:%x pte:%x->%x",



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201305231224.r4NCOksf092731>