Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 21 May 2013 14:39:56 +0200
From:      Zbyszek Bodek <zbb@semihalf.com>
To:        freebsd-arm@FreeBSD.org
Cc:        ray@freebsd.org, Alan Cox <alc@cs.rice.edu>
Subject:   New pmap-v6.c features and improvements
Message-ID:  <519B6B1C.9060008@semihalf.com>

next in thread | raw e-mail | index | archive | help
This is a multi-part message in MIME format.
--------------070804070705080607020805
Content-Type: text/plain; charset=ISO-8859-1; format=flowed
Content-Transfer-Encoding: 7bit

Hello Everyone,

I would like to introduce another pack of patches for pmap-v6.c and 
related, that we created as a part of Semihalf work on Superpages support.

The patches include some major changes like:

- Switch to AP[1:0] access permissions model
- Transition of the mapping related flags to PTE (stop using PVF_ flags
   in pv_entry)
- Rework of the pmap_enter_locked() function
- pmap code optimizations

And some minor clean-ups:

- Get rid of the VERBOSE_INIT_ARM option
- Clean-ups, style and naming improvements to pmap

Please check out the attachment for details.

I will be happy to answer your questions and doubts if any.

Best regards
Zbyszek Bodek

--------------070804070705080607020805
Content-Type: text/x-patch;
 name="0001-arm-Switch-to-AP-2-1-access-permissions-model.-Store.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
 filename*0="0001-arm-Switch-to-AP-2-1-access-permissions-model.-Store.pa";
 filename*1="tch"

>From 77f8ef738fc22b91799650e18c2dece556a5ef0b Mon Sep 17 00:00:00 2001
From: Zbigniew Bodek <zbb@semihalf.com>
Date: Thu, 25 Apr 2013 00:24:51 +0200
Subject: [PATCH 1/5] arm: Switch to AP[2:1] access permissions model. Store
 "referenced" bit in PTE.

Enable Access Flag in CPU control. With AF enabled each valid mapping needs to
have referenced bit in PTE set in order to be able to cache it in the TLB.

AP[0] bit is to be used as reference flag.
All access permissions are encoded by AP[2:1] wherein AP[1] is in fact
"user enable" and AP[2](APX) is "write disable".

All mappings are always set to be valid. Reference emulation is performed by
setting/clearing reference flag in PTE.

md.pvh_attrs are no longer necessary however pv_flags are still being used for
now.

Marking vm_page as "dirty" or "referenced" is being performed on:
- page or flag fault servicing in pmap_fault_fixup(), basing on the fault type
- vm_fault servicing in pmap_enter() according to the desired protections
  and faulty access type
Redundant page marking has been removed as on ARM we know exactly when the
particular page is referenced or is going to be written.
---
 sys/arm/arm/locore.S     |   1 +
 sys/arm/arm/pmap-v6.c    | 225 ++++++++++++++++++++++++-----------------------
 sys/arm/arm/trap.c       |   4 +
 sys/arm/include/armreg.h |   2 +
 sys/arm/include/pmap.h   |  21 ++++-
 5 files changed, 142 insertions(+), 111 deletions(-)

diff --git a/sys/arm/arm/locore.S b/sys/arm/arm/locore.S
index 4f06afb..f17ab8e 100644
--- a/sys/arm/arm/locore.S
+++ b/sys/arm/arm/locore.S
@@ -188,6 +188,7 @@ Lunmapped:
 #ifdef _ARM_ARCH_6
 	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
 	orr	r2, r2, #(CPU_CONTROL_AFLT_ENABLE)
+	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
 #endif
 	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
 	mcr	p15, 0, r0, c1, c0, 0
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 1174660..65abf18 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -220,8 +220,8 @@ static void		pmap_free_pv_entry(pmap_t pmap, pv_entry_t pv);
 static pv_entry_t 	pmap_get_pv_entry(pmap_t pmap, boolean_t try);
 static vm_page_t 	pmap_pv_reclaim(pmap_t locked_pmap);
 
-static void		pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
-    vm_prot_t, boolean_t, int);
+static void		pmap_enter_locked(pmap_t, vm_offset_t, vm_prot_t,
+    vm_page_t, vm_prot_t, boolean_t, int);
 static vm_paddr_t	pmap_extract_locked(pmap_t pmap, vm_offset_t va);
 static void		pmap_alloc_l1(pmap_t);
 static void		pmap_free_l1(pmap_t);
@@ -902,10 +902,6 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
 
 	if (maskbits & PVF_WRITE)
 		maskbits |= PVF_MOD;
-	/*
-	 * Clear saved attributes (modify, reference)
-	 */
-	pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
 
 	if (TAILQ_EMPTY(&pg->md.pv_list)) {
 		rw_wunlock(&pvh_global_lock);
@@ -935,14 +931,13 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
 			npte |= L2_APX;
 		}
 
-		if (maskbits & PVF_REF) {
+		if ((maskbits & PVF_REF) && L2_S_REFERENCED(opte)) {
 			/*
-			 * Make the PTE invalid so that we will take a
-			 * page fault the next time the mapping is
-			 * referenced.
+			 * Clear referenced flag in PTE so that we
+			 * will take a flag fault the next time the mapping
+			 * is referenced.
 			 */
-			npte &= ~L2_TYPE_MASK;
-			npte |= L2_TYPE_INV;
+			npte &= ~L2_S_REF;
 		}
 
 		CTR4(KTR_PMAP,"clearbit: pmap:%p bits:%x pte:%x->%x",
@@ -998,7 +993,6 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
 	pve->pv_flags = flags;
 
 	TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
-	pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
 	if (pve->pv_flags & PVF_WIRED)
 		++pm->pm_stats.wired_count;
 	vm_page_aflag_set(pg, PGA_REFERENCED);
@@ -1036,6 +1030,12 @@ vector_page_setprot(int prot)
 	l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
 
 	ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
+	/*
+	 * Set referenced flag.
+	 * Vectors' page is always desired
+	 * to be allowed to reside in TLB. 
+	 */
+	*ptep |= L2_S_REF;
 
 	pmap_set_prot(ptep, prot|VM_PROT_EXECUTE, 0);
 
@@ -1052,16 +1052,15 @@ pmap_set_prot(pt_entry_t *ptep, vm_prot_t prot, uint8_t user)
 	if (!(prot & VM_PROT_EXECUTE))
 		*ptep |= L2_XN;
 
+	/* Set defaults first - kernel read access */
 	*ptep |= L2_APX;
 	*ptep |= L2_S_PROT_R;
-
+	/* Now tune APs as desired */
 	if (user)
 		*ptep |= L2_S_PROT_U;
 
 	if (prot & VM_PROT_WRITE)
 		*ptep &= ~(L2_APX);
-	else if (user)
-		*ptep &= ~(L2_S_PROT_R);
 }
 
 /*
@@ -1087,20 +1086,11 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
 	if (pve->pv_flags & PVF_WIRED)
 		--pm->pm_stats.wired_count;
 
-	if (pg->md.pvh_attrs & PVF_MOD)
-		vm_page_dirty(pg);
-
-	if (TAILQ_FIRST(&pg->md.pv_list) == NULL)
-		pg->md.pvh_attrs &= ~PVF_REF;
-	else
-		vm_page_aflag_set(pg, PGA_REFERENCED);
-
 	if (pve->pv_flags & PVF_WRITE) {
 		TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list)
 		    if (pve->pv_flags & PVF_WRITE)
 			    break;
 		if (!pve) {
-			pg->md.pvh_attrs &= ~PVF_MOD;
 			vm_page_aflag_clear(pg, PGA_WRITEABLE);
 		}
 	}
@@ -1150,10 +1140,6 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
 	/*
 	 * There is at least one VA mapping this page.
 	 */
-
-	if (clr_mask & (PVF_REF | PVF_MOD))
-		pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
-
 	oflags = npv->pv_flags;
 	npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
 
@@ -1168,10 +1154,8 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
 			if (npv->pv_flags & PVF_WRITE)
 				break;
 		}
-		if (!npv) {
-			pg->md.pvh_attrs &= ~PVF_MOD;
+		if (!npv)
 			vm_page_aflag_clear(pg, PGA_WRITEABLE);
-		}
 	}
 
 	return (oflags);
@@ -1350,7 +1334,8 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 	pa = l2pte_pa(pte);
 	CTR5(KTR_PMAP, "pmap_fault_fix: pmap:%p va:%x pte:0x%x ftype:%x user:%x",
 	    pm, va, pte, ftype, user);
-	if ((ftype & VM_PROT_WRITE) && !(L2_S_WRITABLE(pte))) {
+	if ((ftype & VM_PROT_WRITE) && !(L2_S_WRITABLE(pte)) &&
+	    L2_S_REFERENCED(pte)) {
 		/*
 		 * This looks like a good candidate for "page modified"
 		 * emulation...
@@ -1379,17 +1364,16 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 		if ((pv->pv_flags & PVF_WRITE) == 0) {
 			goto out;
 		}
-		pg->md.pvh_attrs |= PVF_REF | PVF_MOD;
+
 		vm_page_dirty(pg);
 		pv->pv_flags |= PVF_REF | PVF_MOD;
 
 		/* Re-enable write permissions for the page */
-		*ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO;
 		pmap_set_prot(ptep, VM_PROT_WRITE, *ptep & L2_S_PROT_U);
 		CTR1(KTR_PMAP, "pmap_fault_fix: new pte:0x%x", pte);
 		PTE_SYNC(ptep);
 		rv = 1;
-	} else if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
+	} else if (!L2_S_REFERENCED(pte)) {
 		/*
 		 * This looks like a good candidate for "page referenced"
 		 * emulation.
@@ -1401,16 +1385,15 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
 			goto out;
 		/* Get the current flags for this page. */
-
 		pv = pmap_find_pv(pg, pm, va);
 		if (pv == NULL)
 			goto out;
 
-		pg->md.pvh_attrs |= PVF_REF;
+		vm_page_aflag_set(pg, PGA_REFERENCED);
 		pv->pv_flags |= PVF_REF;
 
-
-		*ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO;
+		/* Mark the page "referenced" */
+		*ptep = pte | L2_S_REF;
 		PTE_SYNC(ptep);
 		rv = 1;
 	}
@@ -1901,7 +1884,7 @@ pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
 	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 
 	ptep = &l2b->l2b_kva[l2pte_index(va)];
-	*ptep = L2_S_PROTO | pa | cache_mode;
+	*ptep = L2_S_PROTO | pa | cache_mode | L2_S_REF;
 	pmap_set_prot(ptep, VM_PROT_READ | VM_PROT_WRITE, 0);
 	PTE_SYNC(ptep);
 
@@ -2208,11 +2191,11 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
 	}
 
 	if (flags & KENTER_CACHE) {
-		*pte = L2_S_PROTO | pa | pte_l2_s_cache_mode;
+		*pte = L2_S_PROTO | pa | pte_l2_s_cache_mode | L2_S_REF;
 		pmap_set_prot(pte, VM_PROT_READ | VM_PROT_WRITE,
 		    flags & KENTER_USER);
 	} else {
-		*pte = L2_S_PROTO | pa;
+		*pte = L2_S_PROTO | pa | L2_S_REF;
 		pmap_set_prot(pte, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
 		    0);
 	}
@@ -2476,8 +2459,6 @@ pmap_remove_all(vm_page_t m)
 		l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
 		KASSERT(l2b != NULL, ("No l2 bucket"));
 		ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
-		if (L2_S_WRITABLE(*ptep))
-			vm_page_dirty(m);
 		*ptep = 0;
 		if (pmap_is_current(pmap))
 			PTE_SYNC(ptep);
@@ -2488,7 +2469,6 @@ pmap_remove_all(vm_page_t m)
 		pmap_free_pv_entry(pmap, pv);
 		PMAP_UNLOCK(pmap);
 	}
-	m->md.pvh_attrs &= ~(PVF_MOD | PVF_REF);
 
 	if (flush) {
 		if (PV_BEEN_EXECD(flags))
@@ -2620,8 +2600,6 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 
 				f = pmap_modify_pv(pg, pm, sva,
 				    PVF_WRITE, 0);
-				if (f & PVF_WRITE)
-					vm_page_dirty(pg);
 
 				if (flush >= 0) {
 					flush++;
@@ -2673,7 +2651,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
-	pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK);
+	pmap_enter_locked(pmap, va, access, m, prot, wired, M_WAITOK);
 	PMAP_UNLOCK(pmap);
 	rw_wunlock(&pvh_global_lock);
 }
@@ -2682,8 +2660,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
  *	The pvh global and pmap locks must be held.
  */
 static void
-pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
-    boolean_t wired, int flags)
+pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
+    vm_prot_t prot, boolean_t wired, int flags)
 {
 	struct l2_bucket *l2b = NULL;
 	struct vm_page *opg;
@@ -2763,8 +2741,7 @@ do_l2b_alloc:
 	} else
 		opg = NULL;
 
-	if ((prot & (VM_PROT_ALL)) ||
-	    (!m || m->md.pvh_attrs & PVF_REF)) {
+	if ((prot & (VM_PROT_ALL)) || !m) {
 		/*
 		 * - The access type indicates that we don't need
 		 *   to do referenced emulation.
@@ -2772,48 +2749,47 @@ do_l2b_alloc:
 		 * - The physical page has already been referenced
 		 *   so no need to re-do referenced emulation here.
 		 */
-		npte |= L2_S_PROTO;
-#ifdef SMP
-		npte |= L2_SHARED;
-#endif
-
+		npte |= L2_S_REF;
 		nflags |= PVF_REF;
 
-		if (m && ((prot & VM_PROT_WRITE) != 0 ||
-		    (m->md.pvh_attrs & PVF_MOD))) {
-			/*
-			 * This is a writable mapping, and the
-			 * page's mod state indicates it has
-			 * already been modified. Make it
-			 * writable from the outset.
-			 */
-			nflags |= PVF_MOD;
-			if (!(m->md.pvh_attrs & PVF_MOD))
-				vm_page_dirty(m);
-		}
-		if (m && opte)
+		if (m != NULL &&
+		    (m->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(m, PGA_REFERENCED);
 	} else {
 		/*
 		 * Need to do page referenced emulation.
 		 */
-		npte &= ~L2_TYPE_MASK;
-		npte |= L2_TYPE_INV;
+		npte &= ~L2_S_REF;
 	}
 
+	/* Make the new PTE valid */
+	npte |= L2_S_PROTO;
+#ifdef SMP
+	npte |= L2_SHARED;
+#endif
+	/* Set defaults first - kernel read access */
 	npte |= L2_APX;
 	npte |= L2_S_PROT_R;
+
+	/* Now tune APs as desired */
 	if (user)
 		npte |= L2_S_PROT_U;
 
 	if (prot & VM_PROT_WRITE) {
 		npte &= ~(L2_APX);
 
-		if (m != NULL &&
-		    (m->oflags & VPO_UNMANAGED) == 0)
+		if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0) {
 			vm_page_aflag_set(m, PGA_WRITEABLE);
-	} else if (user)
-		npte &= ~(L2_S_PROT_R);
+			/*
+			 * The access type and permissions indicate 
+			 * that the page will be written as soon as returned
+			 * from fault service.
+			 * Mark it dirty from the outset.
+			 */
+			if ((access & VM_PROT_WRITE) != 0)
+				vm_page_dirty(m);
+		}
+	}
 
 	if (!(prot & VM_PROT_EXECUTE) && m)
 		npte |= L2_XN;
@@ -2930,14 +2906,16 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 {
 	vm_page_t m;
 	vm_pindex_t diff, psize;
+	vm_prot_t access;
 
 	psize = atop(end - start);
 	m = m_start;
+	access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
-		pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
-		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
+		pmap_enter_locked(pmap, start + ptoa(diff), access, m, prot,
+		    FALSE, M_NOWAIT);
 		m = TAILQ_NEXT(m, listq);
 	}
 	PMAP_UNLOCK(pmap);
@@ -2956,11 +2934,12 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 void
 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 {
+	vm_prot_t access;
 
+	access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
-	pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
-	    FALSE, M_NOWAIT);
+	pmap_enter_locked(pmap, va, access, m, prot, FALSE, M_NOWAIT);
 	PMAP_UNLOCK(pmap);
 	rw_wunlock(&pvh_global_lock);
 }
@@ -3565,7 +3544,7 @@ pmap_zero_page_gen(vm_page_t pg, int off, int size)
 	 * Note the temporary zero-page mapping must be a non-cached page in
 	 * order to work without corruption when write-allocate is enabled.
 	 */
-	*cdst_pte = L2_S_PROTO | phys | pte_l2_s_cache_mode;
+	*cdst_pte = L2_S_PROTO | phys | pte_l2_s_cache_mode | L2_S_REF;
 	pmap_set_prot(cdst_pte, VM_PROT_WRITE, 0);
 	PTE_SYNC(cdst_pte);
 	cpu_tlb_flushD_SE(cdstp);
@@ -3657,11 +3636,11 @@ pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
 	 * bits set to 0x0 makes page not accessible. csrc_pte is mapped
 	 * read/write until proper mapping defines are created for ARMv6.
 	 */
-	*csrc_pte = L2_S_PROTO | src | pte_l2_s_cache_mode;
+	*csrc_pte = L2_S_PROTO | src | pte_l2_s_cache_mode | L2_S_REF;
 	pmap_set_prot(csrc_pte, VM_PROT_READ, 0);
 	PTE_SYNC(csrc_pte);
 
-	*cdst_pte = L2_S_PROTO | dst | pte_l2_s_cache_mode;
+	*cdst_pte = L2_S_PROTO | dst | pte_l2_s_cache_mode | L2_S_REF;
 	pmap_set_prot(cdst_pte, VM_PROT_READ | VM_PROT_WRITE, 0);
 	PTE_SYNC(cdst_pte);
 
@@ -3703,11 +3682,11 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
 		b_pg_offset = b_offset & PAGE_MASK;
 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
 		*csrc_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(a_pg) |
-		    pte_l2_s_cache_mode;
+		    pte_l2_s_cache_mode | L2_S_REF;
 		pmap_set_prot(csrc_pte, VM_PROT_READ, 0);
 		PTE_SYNC(csrc_pte);
 		*cdst_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(b_pg) |
-		    pte_l2_s_cache_mode;
+		    pte_l2_s_cache_mode | L2_S_REF;
 		pmap_set_prot(cdst_pte, VM_PROT_READ | VM_PROT_WRITE, 0);
 		PTE_SYNC(cdst_pte);
 		cpu_tlb_flushD_SE(csrcp);
@@ -3798,10 +3777,28 @@ pmap_page_wired_mappings(vm_page_t m)
 boolean_t
 pmap_is_referenced(vm_page_t m)
 {
+	struct l2_bucket *l2b;
+	pv_entry_t pv;
+	pt_entry_t *pte;
+	pmap_t pmap;
+	boolean_t rv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_is_referenced: page %p is not managed", m));
-	return ((m->md.pvh_attrs & PVF_REF) != 0);
+	rv = FALSE;
+	rw_wlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
+		pte = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+		rv = L2_S_REFERENCED(*pte);
+		PMAP_UNLOCK(pmap);
+		if (rv)
+			break;
+	}
+	rw_wunlock(&pvh_global_lock);
+	return (rv);
 }
 
 /*
@@ -3822,13 +3819,37 @@ pmap_ts_referenced(vm_page_t m)
 boolean_t
 pmap_is_modified(vm_page_t m)
 {
+	struct l2_bucket *l2b;
+	pv_entry_t pv;
+	pt_entry_t *pte;
+	pmap_t pmap;
+	boolean_t rv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_is_modified: page %p is not managed", m));
-	if (m->md.pvh_attrs & PVF_MOD)
-		return (TRUE);
-
-	return(FALSE);
+	rv = FALSE;
+	/*
+	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
+	 * is clear, no PTEs can have PG_M set.
+	 */
+	VM_OBJECT_ASSERT_WLOCKED(m->object);
+	if ((m->oflags & VPO_BUSY) == 0 &&
+	    (m->aflags & PGA_WRITEABLE) == 0)
+		return (rv);
+	rw_wlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
+		pte = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+		rv = (L2_S_WRITABLE(*pte));
+		PMAP_UNLOCK(pmap);
+		if (rv)
+			break;
+	}
+	rw_wunlock(&pvh_global_lock);
+	return (rv);
 }
 
 
@@ -3852,8 +3873,7 @@ pmap_clear_modify(vm_page_t m)
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
-
-	if (m->md.pvh_attrs & PVF_MOD)
+	if (pmap_is_modified(m))
 		pmap_clearbit(m, PVF_MOD);
 }
 
@@ -3869,7 +3889,7 @@ pmap_clear_reference(vm_page_t m)
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_reference: page %p is not managed", m));
-	if (m->md.pvh_attrs & PVF_REF)
+	if (pmap_is_referenced(m))
 		pmap_clearbit(m, PVF_REF);
 }
 
@@ -3930,18 +3950,7 @@ retry:
 	if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0)
 		managed = TRUE;
 	if (managed) {
-		/*
-		 * The ARM pmap tries to maintain a per-mapping
-		 * reference bit.  The trouble is that it's kept in
-		 * the PV entry, not the PTE, so it's costly to access
-		 * here.  You would need to acquire the pvh global
-		 * lock, call pmap_find_pv(), and introduce a custom
-		 * version of vm_page_pa_tryrelock() that releases and
-		 * reacquires the pvh global lock.  In the end, I
-		 * doubt it's worthwhile.  This may falsely report
-		 * the given address as referenced.
-		 */
-		if ((m->md.pvh_attrs & PVF_REF) != 0)
+		if (L2_S_REFERENCED(pte))
 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
 	}
 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
@@ -4072,7 +4081,7 @@ pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
 	if (pte == NULL)
 		panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va);
 
-	pte[l2pte_index(va)] = L2_S_PROTO | pa | fl;
+	pte[l2pte_index(va)] = L2_S_PROTO | pa | fl | L2_S_REF;
 	pmap_set_prot(&pte[l2pte_index(va)], prot, 0);
 	PTE_SYNC(&pte[l2pte_index(va)]);
 }
@@ -4159,7 +4168,7 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 #ifdef VERBOSE_INIT_ARM
 		printf("P");
 #endif
-		pte[l2pte_index(va)] = L2_S_PROTO | pa | f2s;
+		pte[l2pte_index(va)] = L2_S_PROTO | pa | f2s | L2_S_REF;
 		pmap_set_prot(&pte[l2pte_index(va)], prot, 0);
 		PTE_SYNC(&pte[l2pte_index(va)]);
 		va += PAGE_SIZE;
diff --git a/sys/arm/arm/trap.c b/sys/arm/arm/trap.c
index 1b22c83..a9dffa2 100644
--- a/sys/arm/arm/trap.c
+++ b/sys/arm/arm/trap.c
@@ -160,7 +160,11 @@ static const struct data_abort data_aborts[] = {
 	{dab_align,	"Alignment Fault 3"},
 	{dab_buserr,	"External Linefetch Abort (S)"},
 	{NULL,		"Translation Fault (S)"},
+#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
+	{NULL,		"Translation Flag Fault"},
+#else
 	{dab_buserr,	"External Linefetch Abort (P)"},
+#endif
 	{NULL,		"Translation Fault (P)"},
 	{dab_buserr,	"External Non-Linefetch Abort (S)"},
 	{NULL,		"Domain Fault (S)"},
diff --git a/sys/arm/include/armreg.h b/sys/arm/include/armreg.h
index 238dfe2..1f54f91 100644
--- a/sys/arm/include/armreg.h
+++ b/sys/arm/include/armreg.h
@@ -290,6 +290,7 @@
 #define CPU_CONTROL_UNAL_ENABLE 0x00400000 /* U: unaligned data access */
 #define CPU_CONTROL_V6_EXTPAGE	0x00800000 /* XP: ARMv6 extended page tables */
 #define CPU_CONTROL_L2_ENABLE	0x04000000 /* L2 Cache enabled */
+#define CPU_CONTROL_AF_ENABLE	0x20000000 /* Access Flag enable */
 
 #define CPU_CONTROL_IDC_ENABLE	CPU_CONTROL_DC_ENABLE
 
@@ -395,6 +396,7 @@
 #define FAULT_ALIGN_0   0x01 /* Alignment */
 #define FAULT_ALIGN_1   0x03 /* Alignment */
 #define FAULT_TRANS_S   0x05 /* Translation -- Section */
+#define FAULT_TRANS_F   0x06 /* Translation -- Flag */
 #define FAULT_TRANS_P   0x07 /* Translation -- Page */
 #define FAULT_DOMAIN_S  0x09 /* Domain -- Section */
 #define FAULT_DOMAIN_P  0x0b /* Domain -- Page */
diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h
index 3a4726f..e91fce7 100644
--- a/sys/arm/include/pmap.h
+++ b/sys/arm/include/pmap.h
@@ -373,12 +373,27 @@ extern int pmap_needs_pte_sync;
 #define	L2_S_PROTO		L2_S_PROTO_xscale
 
 #elif (ARM_MMU_V6 + ARM_MMU_V7) != 0
-
-#define	L2_S_PROT_U		(L2_AP0(2))		/* user access */
-#define	L2_S_PROT_R		(L2_AP0(1))		/* read access */
+/*
+ * AP[2:1] access permissions model:
+ *
+ * AP[2](APX)	- Write Disable
+ * AP[1]	- User Enable
+ * AP[0]	- Reference Flag
+ *
+ * AP[2]     AP[1]     Kernel     User
+ *  0          0        R/W        N
+ *  0          1        R/W       R/W
+ *  1          0         R         N
+ *  1          1         R         R
+ *
+ */
+#define	L2_S_PROT_R		(0)		/* kernel read */
+#define	L2_S_PROT_U		(L2_AP0(2))	/* user read */
+#define L2_S_REF		(L2_AP0(1))	/* reference flag */
 
 #define	L2_S_PROT_MASK		(L2_S_PROT_U|L2_S_PROT_R)
 #define	L2_S_WRITABLE(pte)	(!(pte & L2_APX))
+#define	L2_S_REFERENCED(pte)	(!!(pte & L2_S_REF))
 
 #ifndef SMP
 #define	L1_S_CACHE_MASK		(L1_S_TEX_MASK|L1_S_B|L1_S_C)
-- 
1.8.2


--------------070804070705080607020805
Content-Type: text/x-patch;
 name="0002-Improve-optimize-and-clean-up-ARMv6-v7-memory-manage.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
 filename*0="0002-Improve-optimize-and-clean-up-ARMv6-v7-memory-manage.pa";
 filename*1="tch"

>From ada124b38c0879ca00b057948fb7487493a99cc0 Mon Sep 17 00:00:00 2001
From: Zbigniew Bodek <zbb@semihalf.com>
Date: Tue, 16 Apr 2013 17:17:23 +0200
Subject: [PATCH 2/5] Improve, optimize and clean-up ARMv6/v7 memory management
 related code.

Use pmap_find_pv if needed instead of multiplying its code throughout
pmap-v6.

Avoid possible NULL pointer dereference in pmap_enter_locked()
When trying to get m->md.pv_memattr, make sure that m != NULL,
in particular that vector_page is set to be NULL.

Do not set PGA_REFERENCED flag in pmap_enter_pv().
On ARM any new page reference will result in either entering the new
mapping by calling pmap_enter, etc. or fixing-up the existing mapping in
pmap_fault_fixup().
Therefore we set PGA_REFERENCED flag in the earlier mentioned cases and
setting it later in pmap_enter_pv() is just waste of cycles.

Delete unused pm_pdir pointer from the pmap structure.

Rearrange brackets in the fault cause detection in trap.c
Place the brackets correctly in order to see course of the conditions
instantaneously.

Unify naming in pmap-v6.c and improve style
Use naming common for whole pmap and compatible with other pmaps,
improve style where possible:
pm   -> pmap
pg   -> m
opg  -> om
*pt  -> *ptep
*pte -> *ptep
*pde -> *pdep
---
 sys/arm/arm/pmap-v6.c  | 416 ++++++++++++++++++++++++-------------------------
 sys/arm/arm/pmap.c     |   2 -
 sys/arm/arm/trap.c     |  19 ++-
 sys/arm/include/pmap.h |   1 -
 4 files changed, 216 insertions(+), 222 deletions(-)

diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 65abf18..723c76d 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -563,7 +563,7 @@ pmap_pte_init_mmu_v6(void)
  * This is called at pmap creation time.
  */
 static void
-pmap_alloc_l1(pmap_t pm)
+pmap_alloc_l1(pmap_t pmap)
 {
 	struct l1_ttable *l1;
 	u_int8_t domain;
@@ -594,8 +594,8 @@ pmap_alloc_l1(pmap_t pm)
 	/*
 	 * Fix up the relevant bits in the pmap structure
 	 */
-	pm->pm_l1 = l1;
-	pm->pm_domain = domain + 1;
+	pmap->pm_l1 = l1;
+	pmap->pm_domain = domain + 1;
 }
 
 /*
@@ -603,9 +603,9 @@ pmap_alloc_l1(pmap_t pm)
  * This is called at pmap destruction time.
  */
 static void
-pmap_free_l1(pmap_t pm)
+pmap_free_l1(pmap_t pmap)
 {
-	struct l1_ttable *l1 = pm->pm_l1;
+	struct l1_ttable *l1 = pmap->pm_l1;
 
 	mtx_lock(&l1_lru_lock);
 
@@ -618,8 +618,8 @@ pmap_free_l1(pmap_t pm)
 	/*
 	 * Free up the domain number which was allocated to the pmap
 	 */
-	l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first;
-	l1->l1_domain_first = pm->pm_domain - 1;
+	l1->l1_domain_free[pmap->pm_domain - 1] = l1->l1_domain_first;
+	l1->l1_domain_first = pmap->pm_domain - 1;
 	l1->l1_domain_use_count--;
 
 	/*
@@ -641,7 +641,7 @@ pmap_free_l1(pmap_t pm)
  * and VA, or NULL if no L2 bucket exists for the address.
  */
 static PMAP_INLINE struct l2_bucket *
-pmap_get_l2_bucket(pmap_t pm, vm_offset_t va)
+pmap_get_l2_bucket(pmap_t pmap, vm_offset_t va)
 {
 	struct l2_dtable *l2;
 	struct l2_bucket *l2b;
@@ -649,7 +649,7 @@ pmap_get_l2_bucket(pmap_t pm, vm_offset_t va)
 
 	l1idx = L1_IDX(va);
 
-	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
+	if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL ||
 	    (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
 		return (NULL);
 
@@ -669,7 +669,7 @@ pmap_get_l2_bucket(pmap_t pm, vm_offset_t va)
  * the bucket/page in the meantime.
  */
 static struct l2_bucket *
-pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
+pmap_alloc_l2_bucket(pmap_t pmap, vm_offset_t va)
 {
 	struct l2_dtable *l2;
 	struct l2_bucket *l2b;
@@ -677,36 +677,36 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
 
 	l1idx = L1_IDX(va);
 
-	PMAP_ASSERT_LOCKED(pm);
+	PMAP_ASSERT_LOCKED(pmap);
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
-	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
+	if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
 		/*
 		 * No mapping at this address, as there is
 		 * no entry in the L1 table.
 		 * Need to allocate a new l2_dtable.
 		 */
-		PMAP_UNLOCK(pm);
+		PMAP_UNLOCK(pmap);
 		rw_wunlock(&pvh_global_lock);
 		if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) {
 			rw_wlock(&pvh_global_lock);
-			PMAP_LOCK(pm);
+			PMAP_LOCK(pmap);
 			return (NULL);
 		}
 		rw_wlock(&pvh_global_lock);
-		PMAP_LOCK(pm);
-		if (pm->pm_l2[L2_IDX(l1idx)] != NULL) {
+		PMAP_LOCK(pmap);
+		if (pmap->pm_l2[L2_IDX(l1idx)] != NULL) {
 			/*
 			 * Someone already allocated the l2_dtable while
 			 * we were doing the same.
 			 */
 			uma_zfree(l2table_zone, l2);
-			l2 = pm->pm_l2[L2_IDX(l1idx)];
+			l2 = pmap->pm_l2[L2_IDX(l1idx)];
 		} else {
 			bzero(l2, sizeof(*l2));
 			/*
 			 * Link it into the parent pmap
 			 */
-			pm->pm_l2[L2_IDX(l1idx)] = l2;
+			pmap->pm_l2[L2_IDX(l1idx)] = l2;
 		}
 	}
 
@@ -722,11 +722,11 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
 		 * No L2 page table has been allocated. Chances are, this
 		 * is because we just allocated the l2_dtable, above.
 		 */
-		PMAP_UNLOCK(pm);
+		PMAP_UNLOCK(pmap);
 		rw_wunlock(&pvh_global_lock);
 		ptep = uma_zalloc(l2zone, M_NOWAIT);
 		rw_wlock(&pvh_global_lock);
-		PMAP_LOCK(pm);
+		PMAP_LOCK(pmap);
 		if (l2b->l2b_kva != 0) {
 			/* We lost the race. */
 			uma_zfree(l2zone, ptep);
@@ -740,7 +740,7 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
 			 * if we allocated a new one above.
 			 */
 			if (l2->l2_occupancy == 0) {
-				pm->pm_l2[L2_IDX(l1idx)] = NULL;
+				pmap->pm_l2[L2_IDX(l1idx)] = NULL;
 				uma_zfree(l2table_zone, l2);
 			}
 			return (NULL);
@@ -769,7 +769,7 @@ pmap_free_l2_ptp(pt_entry_t *l2)
  * for the kernel pmap).
  */
 static void
-pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
+pmap_free_l2_bucket(pmap_t pmap, struct l2_bucket *l2b, u_int count)
 {
 	struct l2_dtable *l2;
 	pd_entry_t *pl1pd, l1pd;
@@ -797,7 +797,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
 	 * to a performance win over time as we don't need to continually
 	 * alloc/free.
 	 */
-	if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
+	if (l2b->l2b_occupancy > 0 || pmap == pmap_kernel())
 		return;
 
 	/*
@@ -809,14 +809,14 @@ pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
 	ptep = l2b->l2b_kva;
 	l2b->l2b_kva = NULL;
 
-	pl1pd = &pm->pm_l1->l1_kva[l1idx];
+	pl1pd = &pmap->pm_l1->l1_kva[l1idx];
 
 	/*
 	 * If the L1 slot matches the pmap's domain
 	 * number, then invalidate it.
 	 */
 	l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
-	if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) {
+	if (l1pd == (L1_C_DOM(pmap->pm_domain) | L1_TYPE_C)) {
 		*pl1pd = 0;
 		PTE_SYNC(pl1pd);
 	}
@@ -829,7 +829,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
 	/*
 	 * Update the reference count in the associated l2_dtable
 	 */
-	l2 = pm->pm_l2[L2_IDX(l1idx)];
+	l2 = pmap->pm_l2[L2_IDX(l1idx)];
 	if (--l2->l2_occupancy > 0)
 		return;
 
@@ -838,7 +838,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
 	 * slots managed by this l2_dtable. Go ahead and NULL-out
 	 * the pointer in the parent pmap and free the l2_dtable.
 	 */
-	pm->pm_l2[L2_IDX(l1idx)] = NULL;
+	pmap->pm_l2[L2_IDX(l1idx)] = NULL;
 	uma_zfree(l2table_zone, l2);
 }
 
@@ -888,12 +888,12 @@ pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
  * constants and the latter would require an extra inversion at run-time.
  */
 static int
-pmap_clearbit(struct vm_page *pg, u_int maskbits)
+pmap_clearbit(struct vm_page *m, u_int maskbits)
 {
 	struct l2_bucket *l2b;
 	struct pv_entry *pv;
 	pt_entry_t *ptep, npte, opte;
-	pmap_t pm;
+	pmap_t pmap;
 	vm_offset_t va;
 	u_int oflags;
 	int count = 0;
@@ -903,7 +903,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
 	if (maskbits & PVF_WRITE)
 		maskbits |= PVF_MOD;
 
-	if (TAILQ_EMPTY(&pg->md.pv_list)) {
+	if (TAILQ_EMPTY(&m->md.pv_list)) {
 		rw_wunlock(&pvh_global_lock);
 		return (0);
 	}
@@ -911,21 +911,21 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
 	/*
 	 * Loop over all current mappings setting/clearing as appropos
 	 */
-	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 		va = pv->pv_va;
-		pm = PV_PMAP(pv);
+		pmap = PV_PMAP(pv);
 		oflags = pv->pv_flags;
 		pv->pv_flags &= ~maskbits;
 
-		PMAP_LOCK(pm);
+		PMAP_LOCK(pmap);
 
-		l2b = pmap_get_l2_bucket(pm, va);
+		l2b = pmap_get_l2_bucket(pmap, va);
 
 		ptep = &l2b->l2b_kva[l2pte_index(va)];
 		npte = opte = *ptep;
 
 		if ((maskbits & (PVF_WRITE|PVF_MOD)) && L2_S_WRITABLE(opte)) {
-			vm_page_dirty(pg);
+			vm_page_dirty(m);
 
 			/* make the pte read only */
 			npte |= L2_APX;
@@ -941,7 +941,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
 		}
 
 		CTR4(KTR_PMAP,"clearbit: pmap:%p bits:%x pte:%x->%x",
-		    pm, maskbits, opte, npte);
+		    pmap, maskbits, opte, npte);
 		if (npte != opte) {
 			count++;
 			*ptep = npte;
@@ -953,12 +953,12 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
 				cpu_tlb_flushD_SE(pv->pv_va);
 		}
 
-		PMAP_UNLOCK(pm);
+		PMAP_UNLOCK(pmap);
 
 	}
 
 	if (maskbits & PVF_WRITE)
-		vm_page_aflag_clear(pg, PGA_WRITEABLE);
+		vm_page_aflag_clear(m, PGA_WRITEABLE);
 	rw_wunlock(&pvh_global_lock);
 	return (count);
 }
@@ -982,20 +982,19 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
  * => caller should not adjust pmap's wire_count
  */
 static void
-pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
+pmap_enter_pv(struct vm_page *m, struct pv_entry *pve, pmap_t pmap,
     vm_offset_t va, u_int flags)
 {
 
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
 
-	PMAP_ASSERT_LOCKED(pm);
+	PMAP_ASSERT_LOCKED(pmap);
 	pve->pv_va = va;
 	pve->pv_flags = flags;
 
-	TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
+	TAILQ_INSERT_HEAD(&m->md.pv_list, pve, pv_list);
 	if (pve->pv_flags & PVF_WIRED)
-		++pm->pm_stats.wired_count;
-	vm_page_aflag_set(pg, PGA_REFERENCED);
+		++pmap->pm_stats.wired_count;
 }
 
 /*
@@ -1005,13 +1004,13 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
  * => caller should hold lock on vm_page
  */
 static PMAP_INLINE struct pv_entry *
-pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+pmap_find_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va)
 {
 	struct pv_entry *pv;
 
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
-	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list)
-	    if (pm == PV_PMAP(pv) && va == pv->pv_va)
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
+	    if (pmap == PV_PMAP(pv) && va == pv->pv_va)
 		    break;
 	return (pv);
 }
@@ -1075,42 +1074,37 @@ pmap_set_prot(pt_entry_t *ptep, vm_prot_t prot, uint8_t user)
  */
 
 static void
-pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
+pmap_nuke_pv(struct vm_page *m, pmap_t pmap, struct pv_entry *pve)
 {
 
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
-	PMAP_ASSERT_LOCKED(pm);
+	PMAP_ASSERT_LOCKED(pmap);
 
-	TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list);
+	TAILQ_REMOVE(&m->md.pv_list, pve, pv_list);
 
 	if (pve->pv_flags & PVF_WIRED)
-		--pm->pm_stats.wired_count;
+		--pmap->pm_stats.wired_count;
 
 	if (pve->pv_flags & PVF_WRITE) {
-		TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list)
+		TAILQ_FOREACH(pve, &m->md.pv_list, pv_list)
 		    if (pve->pv_flags & PVF_WRITE)
 			    break;
 		if (!pve) {
-			vm_page_aflag_clear(pg, PGA_WRITEABLE);
+			vm_page_aflag_clear(m, PGA_WRITEABLE);
 		}
 	}
 }
 
 static struct pv_entry *
-pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+pmap_remove_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va)
 {
 	struct pv_entry *pve;
 
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
-	pve = TAILQ_FIRST(&pg->md.pv_list);
 
-	while (pve) {
-		if (PV_PMAP(pve) == pm && pve->pv_va == va) {	/* match? */
-			pmap_nuke_pv(pg, pm, pve);
-			break;
-		}
-		pve = TAILQ_NEXT(pve, pv_list);
-	}
+	pve = pmap_find_pv(m, pmap, va);	/* find corresponding pve */
+	if (pve != NULL)
+		pmap_nuke_pv(m, pmap, pve);
 
 	return(pve);				/* return removed pve */
 }
@@ -1126,15 +1120,15 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
  * Modify a physical-virtual mapping in the pv table
  */
 static u_int
-pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
+pmap_modify_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va,
     u_int clr_mask, u_int set_mask)
 {
 	struct pv_entry *npv;
 	u_int flags, oflags;
 
-	PMAP_ASSERT_LOCKED(pm);
+	PMAP_ASSERT_LOCKED(pmap);
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
-	if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
+	if ((npv = pmap_find_pv(m, pmap, va)) == NULL)
 		return (0);
 
 	/*
@@ -1145,17 +1139,17 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
 
 	if ((flags ^ oflags) & PVF_WIRED) {
 		if (flags & PVF_WIRED)
-			++pm->pm_stats.wired_count;
+			++pmap->pm_stats.wired_count;
 		else
-			--pm->pm_stats.wired_count;
+			--pmap->pm_stats.wired_count;
 	}
 	if ((oflags & PVF_WRITE) && !(flags & PVF_WRITE)) {
-		TAILQ_FOREACH(npv, &pg->md.pv_list, pv_list) {
+		TAILQ_FOREACH(npv, &m->md.pv_list, pv_list) {
 			if (npv->pv_flags & PVF_WRITE)
 				break;
 		}
 		if (!npv)
-			vm_page_aflag_clear(pg, PGA_WRITEABLE);
+			vm_page_aflag_clear(m, PGA_WRITEABLE);
 	}
 
 	return (oflags);
@@ -1176,8 +1170,6 @@ pmap_pinit0(struct pmap *pmap)
 {
 	PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap));
 
-	dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n",
-		(u_int32_t) pmap, (u_int32_t) pmap->pm_pdir);
 	bcopy(kernel_pmap, pmap, sizeof(*pmap));
 	bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx));
 	PMAP_LOCK_INIT(pmap);
@@ -1283,7 +1275,7 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
 	"Page share factor per proc");
 
 int
-pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
+pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype, int user)
 {
 	struct l2_dtable *l2;
 	struct l2_bucket *l2b;
@@ -1295,7 +1287,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 
 	l1idx = L1_IDX(va);
 	rw_wlock(&pvh_global_lock);
-	PMAP_LOCK(pm);
+	PMAP_LOCK(pmap);
 
 	/*
 	 * If there is no l2_dtable for this address, then the process
@@ -1304,7 +1296,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 	 * Note: This will catch userland processes trying to access
 	 * kernel addresses.
 	 */
-	l2 = pm->pm_l2[L2_IDX(l1idx)];
+	l2 = pmap->pm_l2[L2_IDX(l1idx)];
 	if (l2 == NULL)
 		goto out;
 
@@ -1333,7 +1325,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 
 	pa = l2pte_pa(pte);
 	CTR5(KTR_PMAP, "pmap_fault_fix: pmap:%p va:%x pte:0x%x ftype:%x user:%x",
-	    pm, va, pte, ftype, user);
+	    pmap, va, pte, ftype, user);
 	if ((ftype & VM_PROT_WRITE) && !(L2_S_WRITABLE(pte)) &&
 	    L2_S_REFERENCED(pte)) {
 		/*
@@ -1341,15 +1333,15 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 		 * emulation...
 		 */
 		struct pv_entry *pv;
-		struct vm_page *pg;
+		struct vm_page *m;
 
 		/* Extract the physical address of the page */
-		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
+		if ((m = PHYS_TO_VM_PAGE(pa)) == NULL) {
 			goto out;
 		}
 		/* Get the current flags for this page. */
 
-		pv = pmap_find_pv(pg, pm, va);
+		pv = pmap_find_pv(m, pmap, va);
 		if (pv == NULL) {
 			goto out;
 		}
@@ -1365,7 +1357,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 			goto out;
 		}
 
-		vm_page_dirty(pg);
+		vm_page_dirty(m);
 		pv->pv_flags |= PVF_REF | PVF_MOD;
 
 		/* Re-enable write permissions for the page */
@@ -1379,17 +1371,17 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 		 * emulation.
 		 */
 		struct pv_entry *pv;
-		struct vm_page *pg;
+		struct vm_page *m;
 
 		/* Extract the physical address of the page */
-		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
+		if ((m = PHYS_TO_VM_PAGE(pa)) == NULL)
 			goto out;
 		/* Get the current flags for this page. */
-		pv = pmap_find_pv(pg, pm, va);
+		pv = pmap_find_pv(m, pmap, va);
 		if (pv == NULL)
 			goto out;
 
-		vm_page_aflag_set(pg, PGA_REFERENCED);
+		vm_page_aflag_set(m, PGA_REFERENCED);
 		pv->pv_flags |= PVF_REF;
 
 		/* Mark the page "referenced" */
@@ -1402,8 +1394,8 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 	 * We know there is a valid mapping here, so simply
 	 * fix up the L1 if necessary.
 	 */
-	pl1pd = &pm->pm_l1->l1_kva[l1idx];
-	l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
+	pl1pd = &pmap->pm_l1->l1_kva[l1idx];
+	l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO;
 	if (*pl1pd != l1pd) {
 		*pl1pd = l1pd;
 		PTE_SYNC(pl1pd);
@@ -1438,9 +1430,9 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 	 * that other parts of the pmap are not doing their job WRT managing
 	 * the TLB.
 	 */
-	if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
-		printf("fixup: pm %p, va 0x%08x, ftype %d - nothing to do!\n",
-		    pm, va, ftype);
+	if (rv == 0 && pmap->pm_l1->l1_domain_use_count == 1) {
+		printf("fixup: pmap %p, va 0x%08x, ftype %d - nothing to do!\n",
+		    pmap, va, ftype);
 		printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
 		    l2, l2b, ptep, pl1pd);
 		printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
@@ -1458,7 +1450,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 
 out:
 	rw_wunlock(&pvh_global_lock);
-	PMAP_UNLOCK(pm);
+	PMAP_UNLOCK(pmap);
 	return (rv);
 }
 
@@ -1511,19 +1503,19 @@ pmap_postinit(void)
  * can be accessed quickly from cpu_switch() et al.
  */
 void
-pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
+pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb)
 {
 	struct l2_bucket *l2b;
 
-	pcb->pcb_pagedir = pm->pm_l1->l1_physaddr;
+	pcb->pcb_pagedir = pmap->pm_l1->l1_physaddr;
 	pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
-	    (DOMAIN_CLIENT << (pm->pm_domain * 2));
+	    (DOMAIN_CLIENT << (pmap->pm_domain * 2));
 
 	if (vector_page < KERNBASE) {
-		pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
-		l2b = pmap_get_l2_bucket(pm, vector_page);
+		pcb->pcb_pl1vec = &pmap->pm_l1->l1_kva[L1_IDX(vector_page)];
+		l2b = pmap_get_l2_bucket(pmap, vector_page);
 		pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO |
-		    L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL);
+		    L1_C_DOM(pmap->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL);
 	} else
 		pcb->pcb_pl1vec = NULL;
 }
@@ -1531,14 +1523,14 @@ pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
 void
 pmap_activate(struct thread *td)
 {
-	pmap_t pm;
+	pmap_t pmap;
 	struct pcb *pcb;
 
-	pm = vmspace_pmap(td->td_proc->p_vmspace);
+	pmap = vmspace_pmap(td->td_proc->p_vmspace);
 	pcb = td->td_pcb;
 
 	critical_enter();
-	pmap_set_pcb_pagedir(pm, pcb);
+	pmap_set_pcb_pagedir(pmap, pcb);
 
 	if (td == curthread) {
 		u_int cur_dacr, cur_ttb;
@@ -1871,12 +1863,12 @@ pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
 	struct l2_bucket *l2b;
 	pt_entry_t *ptep;
 	vm_paddr_t pa;
-	struct vm_page *pg;
+	struct vm_page *m;
 
-	pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
-	if (pg == NULL)
+	m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
+	if (m == NULL)
 		return (1);
-	pa = VM_PAGE_TO_PHYS(pg);
+	pa = VM_PAGE_TO_PHYS(m);
 
 	if (pap)
 		*pap = pa;
@@ -1896,7 +1888,7 @@ pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
  * used by pmap_growkernel().
  */
 static __inline struct l2_bucket *
-pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va)
+pmap_grow_l2_bucket(pmap_t pmap, vm_offset_t va)
 {
 	struct l2_dtable *l2;
 	struct l2_bucket *l2b;
@@ -1907,7 +1899,7 @@ pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va)
 
 	l1idx = L1_IDX(va);
 
-	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
+	if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
 		/*
 		 * No mapping at this address, as there is
 		 * no entry in the L1 table.
@@ -1940,7 +1932,7 @@ pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va)
 		/*
 		 * Link it into the parent pmap
 		 */
-		pm->pm_l2[L2_IDX(l1idx)] = l2;
+		pmap->pm_l2[L2_IDX(l1idx)] = l2;
 		memset(l2, 0, sizeof(*l2));
 	}
 
@@ -1994,7 +1986,7 @@ pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va)
 void
 pmap_growkernel(vm_offset_t addr)
 {
-	pmap_t kpm = pmap_kernel();
+	pmap_t kpmap = pmap_kernel();
 
 	if (addr <= pmap_curmaxkvaddr)
 		return;		/* we are OK */
@@ -2005,7 +1997,7 @@ pmap_growkernel(vm_offset_t addr)
 
 	/* Map 1MB at a time */
 	for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE)
-		pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
+		pmap_grow_l2_bucket(kpmap, pmap_curmaxkvaddr);
 
 	/*
 	 * flush out the cache, expensive but growkernel will happen so
@@ -2033,7 +2025,7 @@ pmap_remove_pages(pmap_t pmap)
 	struct pv_entry *pv;
  	struct l2_bucket *l2b = NULL;
  	vm_page_t m;
- 	pt_entry_t *pt;
+ 	pt_entry_t *ptep;
 	struct pv_chunk *pc, *npc;
 	uint32_t inuse, bitmask;
 	int allfree, bit, field, idx;
@@ -2057,12 +2049,15 @@ pmap_remove_pages(pmap_t pmap)
 					continue;
 				}
 				l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
-				KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages"));
-				pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
-				m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK);
-				KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt));
-				*pt = 0;
-				PTE_SYNC(pt);
+				KASSERT(l2b != NULL,
+				    ("No L2 bucket in pmap_remove_pages"));
+				ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+				m = PHYS_TO_VM_PAGE(*ptep & L2_ADDR_MASK);
+				KASSERT((vm_offset_t)m >= KERNBASE,
+				    ("Trying to access non-existent page "
+				     "va %x pte %x", pv->pv_va, *ptep));
+				*ptep = 0;
+				PTE_SYNC(ptep);
 
 				/* Mark free */
 				PV_STAT(pv_entry_frees++);
@@ -2168,7 +2163,7 @@ static PMAP_INLINE void
 pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
 {
 	struct l2_bucket *l2b;
-	pt_entry_t *pte;
+	pt_entry_t *ptep;
 	pt_entry_t opte;
 
 	PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n",
@@ -2180,8 +2175,8 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
 		l2b = pmap_grow_l2_bucket(pmap_kernel(), va);
 	KASSERT(l2b != NULL, ("No L2 Bucket"));
 
-	pte = &l2b->l2b_kva[l2pte_index(va)];
-	opte = *pte;
+	ptep = &l2b->l2b_kva[l2pte_index(va)];
+	opte = *ptep;
 	if (l2pte_valid(opte)) {
 		cpu_tlb_flushD_SE(va);
 		cpu_cpwait();
@@ -2191,18 +2186,18 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
 	}
 
 	if (flags & KENTER_CACHE) {
-		*pte = L2_S_PROTO | pa | pte_l2_s_cache_mode | L2_S_REF;
-		pmap_set_prot(pte, VM_PROT_READ | VM_PROT_WRITE,
+		*ptep = L2_S_PROTO | pa | pte_l2_s_cache_mode | L2_S_REF;
+		pmap_set_prot(ptep, VM_PROT_READ | VM_PROT_WRITE,
 		    flags & KENTER_USER);
 	} else {
-		*pte = L2_S_PROTO | pa | L2_S_REF;
-		pmap_set_prot(pte, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
+		*ptep = L2_S_PROTO | pa | L2_S_REF;
+		pmap_set_prot(ptep, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
 		    0);
 	}
 
 	PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
-	    (uint32_t) pte, opte, *pte));
-	PTE_SYNC(pte);
+	    (uint32_t) ptep, opte, *ptep));
+	PTE_SYNC(ptep);
 	cpu_cpwait();
 }
 
@@ -2246,20 +2241,20 @@ void
 pmap_kremove(vm_offset_t va)
 {
 	struct l2_bucket *l2b;
-	pt_entry_t *pte, opte;
+	pt_entry_t *ptep, opte;
 
 	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 	if (!l2b)
 		return;
 	KASSERT(l2b != NULL, ("No L2 Bucket"));
-	pte = &l2b->l2b_kva[l2pte_index(va)];
-	opte = *pte;
+	ptep = &l2b->l2b_kva[l2pte_index(va)];
+	opte = *ptep;
 	if (l2pte_valid(opte)) {
 		va = va & ~PAGE_MASK;
 		cpu_tlb_flushD_SE(va);
 		cpu_cpwait();
-		*pte = 0;
-		PTE_SYNC(pte);
+		*ptep = 0;
+		PTE_SYNC(ptep);
 	}
 }
 
@@ -2359,13 +2354,13 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 boolean_t
 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 {
-	pd_entry_t *pde;
-	pt_entry_t *pte;
+	pd_entry_t *pdep;
+	pt_entry_t *ptep;
 
-	if (!pmap_get_pde_pte(pmap, addr, &pde, &pte))
+	if (!pmap_get_pde_pte(pmap, addr, &pdep, &ptep))
 		return (FALSE);
-	KASSERT(pte != NULL, ("Valid mapping but no pte ?"));
-	if (*pte == 0)
+	KASSERT(ptep != NULL, ("Valid mapping but no pte ?"));
+	if (*ptep == 0)
 		return (TRUE);
 	return (FALSE);
 }
@@ -2386,18 +2381,19 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
  * a "section" mapping.
  */
 boolean_t
-pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp)
+pmap_get_pde_pte(pmap_t pmap, vm_offset_t va, pd_entry_t **pdp,
+    pt_entry_t **ptp)
 {
 	struct l2_dtable *l2;
 	pd_entry_t *pl1pd, l1pd;
 	pt_entry_t *ptep;
 	u_short l1idx;
 
-	if (pm->pm_l1 == NULL)
+	if (pmap->pm_l1 == NULL)
 		return (FALSE);
 
 	l1idx = L1_IDX(va);
-	*pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
+	*pdp = pl1pd = &pmap->pm_l1->l1_kva[l1idx];
 	l1pd = *pl1pd;
 
 	if (l1pte_section_p(l1pd)) {
@@ -2405,10 +2401,10 @@ pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp)
 		return (TRUE);
 	}
 
-	if (pm->pm_l2 == NULL)
+	if (pmap->pm_l2 == NULL)
 		return (FALSE);
 
-	l2 = pm->pm_l2[L2_IDX(l1idx)];
+	l2 = pmap->pm_l2[L2_IDX(l1idx)];
 
 	if (l2 == NULL ||
 	    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
@@ -2439,7 +2435,7 @@ pmap_remove_all(vm_page_t m)
 	pt_entry_t *ptep;
 	struct l2_bucket *l2b;
 	boolean_t flush = FALSE;
-	pmap_t curpm;
+	pmap_t curpmap;
 	int flags = 0;
 
 	KASSERT((m->flags & PG_FICTITIOUS) == 0,
@@ -2448,10 +2444,10 @@ pmap_remove_all(vm_page_t m)
 	if (TAILQ_EMPTY(&m->md.pv_list))
 		return;
 	rw_wlock(&pvh_global_lock);
-	curpm = vmspace_pmap(curproc->p_vmspace);
+	curpmap = vmspace_pmap(curproc->p_vmspace);
 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 		pmap = PV_PMAP(pv);
-		if (flush == FALSE && (pmap == curpm ||
+		if (flush == FALSE && (pmap == curpmap ||
 		    pmap == pmap_kernel()))
 			flush = TRUE;
 
@@ -2544,7 +2540,7 @@ pmap_change_attr(vm_offset_t sva, vm_size_t len, int mode)
  *	specified range of this map as requested.
  */
 void
-pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 {
 	struct l2_bucket *l2b;
 	pt_entry_t *ptep, pte;
@@ -2553,7 +2549,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 	int flush;
 
 	if ((prot & VM_PROT_READ) == 0) {
-		pmap_remove(pm, sva, eva);
+		pmap_remove(pmap, sva, eva);
 		return;
 	}
 
@@ -2566,7 +2562,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 	}
 
 	rw_wlock(&pvh_global_lock);
-	PMAP_LOCK(pm);
+	PMAP_LOCK(pmap);
 
 	/*
 	 * OK, at this point, we know we're doing write-protect operation.
@@ -2581,7 +2577,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 		if (next_bucket > eva)
 			next_bucket = eva;
 
-		l2b = pmap_get_l2_bucket(pm, sva);
+		l2b = pmap_get_l2_bucket(pmap, sva);
 		if (l2b == NULL) {
 			sva = next_bucket;
 			continue;
@@ -2591,14 +2587,15 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 
 		while (sva < next_bucket) {
 			if ((pte = *ptep) != 0 && L2_S_WRITABLE(pte)) {
-				struct vm_page *pg;
+				struct vm_page *m;
 				u_int f;
 
-				pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
-				pmap_set_prot(ptep, prot, !(pm == pmap_kernel()));
+				m = PHYS_TO_VM_PAGE(l2pte_pa(pte));
+				pmap_set_prot(ptep, prot,
+				    !(pmap == pmap_kernel()));
 				PTE_SYNC(ptep);
 
-				f = pmap_modify_pv(pg, pm, sva,
+				f = pmap_modify_pv(m, pmap, sva,
 				    PVF_WRITE, 0);
 
 				if (flush >= 0) {
@@ -2627,7 +2624,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 	}
 	rw_wunlock(&pvh_global_lock);
 
-	PMAP_UNLOCK(pm);
+	PMAP_UNLOCK(pmap);
 }
 
 
@@ -2664,7 +2661,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
     vm_prot_t prot, boolean_t wired, int flags)
 {
 	struct l2_bucket *l2b = NULL;
-	struct vm_page *opg;
+	struct vm_page *om;
 	struct pv_entry *pve = NULL;
 	pt_entry_t *ptep, npte, opte;
 	u_int nflags;
@@ -2700,8 +2697,9 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 	if (wired)
 		nflags |= PVF_WIRED;
 
-	PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, "
-	    "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired));
+	PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, "
+	    "prot = %x, wired = %x\n", (uint32_t) pmap, va, (uint32_t) m,
+	    prot, wired));
 
 	if (pmap == pmap_kernel()) {
 		l2b = pmap_get_l2_bucket(pmap, va);
@@ -2735,11 +2733,11 @@ do_l2b_alloc:
 		 * vm_page.
 		 */
 		if (l2pte_pa(opte) != pa)
-			opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
+			om = PHYS_TO_VM_PAGE(l2pte_pa(opte));
 		else
-			opg = m;
+			om = m;
 	} else
-		opg = NULL;
+		om = NULL;
 
 	if ((prot & (VM_PROT_ALL)) || !m) {
 		/*
@@ -2794,10 +2792,10 @@ do_l2b_alloc:
 	if (!(prot & VM_PROT_EXECUTE) && m)
 		npte |= L2_XN;
 
-	if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
+	if (m && (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE))
 		npte |= pte_l2_s_cache_mode;
 
-	if (m && m == opg) {
+	if (m && m == om) {
 		/*
 		 * We're changing the attrs of an existing mapping.
 		 */
@@ -2809,13 +2807,13 @@ do_l2b_alloc:
 		 * New mapping, or changing the backing page
 		 * of an existing mapping.
 		 */
-		if (opg) {
+		if (om) {
 			/*
 			 * Replacing an existing mapping with a new one.
 			 * It is part of our managed memory so we
 			 * must remove it from the PV list
 			 */
-			if ((pve = pmap_remove_pv(opg, pmap, va))) {
+			if ((pve = pmap_remove_pv(om, pmap, va))) {
 			    oflags = pve->pv_flags;
 
 			    if (m && ((m->oflags & VPO_UNMANAGED)))
@@ -2824,7 +2822,8 @@ do_l2b_alloc:
 		}
 
 		if ((m && !(m->oflags & VPO_UNMANAGED))) {
-			if ((!pve) && (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
+			if ((!pve) &&
+			    (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
 				panic("pmap_enter: no pv entries");
 
 			KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
@@ -2869,8 +2868,8 @@ do_l2b_alloc:
 				pd_entry_t *pl1pd, l1pd;
 
 				pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
-				l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) |
-				    L1_C_PROTO;
+				l1pd = l2b->l2b_phys |
+				    L1_C_DOM(pmap->pm_domain) | L1_C_PROTO;
 				if (*pl1pd != l1pd) {
 					*pl1pd = l1pd;
 					PTE_SYNC(pl1pd);
@@ -2956,7 +2955,7 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 {
 	struct l2_bucket *l2b;
 	pt_entry_t *ptep, pte;
-	vm_page_t pg;
+	vm_page_t m;
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
@@ -2964,9 +2963,9 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 	KASSERT(l2b, ("No l2b bucket in pmap_change_wiring"));
 	ptep = &l2b->l2b_kva[l2pte_index(va)];
 	pte = *ptep;
-	pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
-	if (pg)
-		pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired);
+	m = PHYS_TO_VM_PAGE(l2pte_pa(pte));
+	if (m != NULL)
+		pmap_modify_pv(m, pmap, va, PVF_WIRED, wired);
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pmap);
 }
@@ -3177,7 +3176,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
 	struct pv_chunk *pc;
 	struct l2_bucket *l2b = NULL;
 	pmap_t pmap;
-	pt_entry_t *pt;
+	pt_entry_t *ptep;
 	pv_entry_t pv;
 	vm_offset_t va;
 	vm_page_t free, m, m_pc;
@@ -3225,13 +3224,13 @@ pmap_pv_reclaim(pmap_t locked_pmap)
 				va = pv->pv_va;
 				l2b = pmap_get_l2_bucket(pmap, va);
 				KASSERT(l2b != NULL, ("No l2 bucket"));
-				pt = &l2b->l2b_kva[l2pte_index(va)];
-				m = PHYS_TO_VM_PAGE(l2pte_pa(*pt));
+				ptep = &l2b->l2b_kva[l2pte_index(va)];
+				m = PHYS_TO_VM_PAGE(l2pte_pa(*ptep));
 				KASSERT((vm_offset_t)m >= KERNBASE,
 				    ("Trying to access non-existent page "
-				     "va %x pte %x in %s", va, *pt));
-				*pt = 0;
-				PTE_SYNC(pt);
+				     "va %x pte %x in %s", va, *ptep));
+				*ptep = 0;
+				PTE_SYNC(ptep);
 				pmap_nuke_pv(m, pmap, pv);
 				pc->pc_map[field] |= 1UL << bit;
 				freed++;
@@ -3422,7 +3421,7 @@ retry:
  */
 #define	PMAP_REMOVE_CLEAN_LIST_SIZE	3
 void
-pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 {
 	struct l2_bucket *l2b;
 	vm_offset_t next_bucket;
@@ -3437,7 +3436,7 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 	 */
 
 	rw_wlock(&pvh_global_lock);
-	PMAP_LOCK(pm);
+	PMAP_LOCK(pmap);
 	total = 0;
 	while (sva < eva) {
 		/*
@@ -3447,7 +3446,7 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 		if (next_bucket > eva)
 			next_bucket = eva;
 
-		l2b = pmap_get_l2_bucket(pm, sva);
+		l2b = pmap_get_l2_bucket(pmap, sva);
 		if (l2b == NULL) {
 			sva = next_bucket;
 			continue;
@@ -3457,7 +3456,7 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 		mappings = 0;
 
 		while (sva < next_bucket) {
-			struct vm_page *pg;
+			struct vm_page *m;
 			pt_entry_t pte;
 			vm_paddr_t pa;
 
@@ -3472,7 +3471,7 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 				continue;
 			}
 
-			pm->pm_stats.resident_count--;
+			pmap->pm_stats.resident_count--;
 			pa = l2pte_pa(pte);
 			is_exec = 0;
 			is_refd = 1;
@@ -3482,27 +3481,26 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 			 * we could cluster a lot of these and do a
 			 * number of sequential pages in one go.
 			 */
-			if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
+			if ((m = PHYS_TO_VM_PAGE(pa)) != NULL) {
 				struct pv_entry *pve;
 
-				pve = pmap_remove_pv(pg, pm, sva);
+				pve = pmap_remove_pv(m, pmap, sva);
 				if (pve) {
 					is_exec = PV_BEEN_EXECD(pve->pv_flags);
 					is_refd = PV_BEEN_REFD(pve->pv_flags);
-					pmap_free_pv_entry(pm, pve);
+					pmap_free_pv_entry(pmap, pve);
 				}
 			}
 
-			if (pmap_is_current(pm)) {
+			if (pmap_is_current(pmap)) {
 				total++;
 				if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) {
 					if (is_exec)
 						cpu_tlb_flushID_SE(sva);
 					else if (is_refd)
 						cpu_tlb_flushD_SE(sva);
-				} else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) {
+				} else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE)
 					flushall = 1;
-				}
 			}
 			*ptep = 0;
 			PTE_SYNC(ptep);
@@ -3512,13 +3510,13 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 			mappings++;
 		}
 
-		pmap_free_l2_bucket(pm, l2b, mappings);
+		pmap_free_l2_bucket(pmap, l2b, mappings);
 	}
 
 	rw_wunlock(&pvh_global_lock);
 	if (flushall)
 		cpu_tlb_flushID();
-	PMAP_UNLOCK(pm);
+	PMAP_UNLOCK(pmap);
 }
 
 /*
@@ -3530,11 +3528,11 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
  * _any_ bulk data very slow.
  */
 static void
-pmap_zero_page_gen(vm_page_t pg, int off, int size)
+pmap_zero_page_gen(vm_page_t m, int off, int size)
 {
 
-	vm_paddr_t phys = VM_PAGE_TO_PHYS(pg);
-	if (!TAILQ_EMPTY(&pg->md.pv_list))
+	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
+	if (!TAILQ_EMPTY(&m->md.pv_list))
 		panic("pmap_zero_page: page has mappings");
 
 	mtx_lock(&cmtx);
@@ -3779,7 +3777,7 @@ pmap_is_referenced(vm_page_t m)
 {
 	struct l2_bucket *l2b;
 	pv_entry_t pv;
-	pt_entry_t *pte;
+	pt_entry_t *ptep;
 	pmap_t pmap;
 	boolean_t rv;
 
@@ -3791,8 +3789,8 @@ pmap_is_referenced(vm_page_t m)
 		pmap = PV_PMAP(pv);
 		PMAP_LOCK(pmap);
 		l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
-		pte = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
-		rv = L2_S_REFERENCED(*pte);
+		ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+		rv = L2_S_REFERENCED(*ptep);
 		PMAP_UNLOCK(pmap);
 		if (rv)
 			break;
@@ -3821,7 +3819,7 @@ pmap_is_modified(vm_page_t m)
 {
 	struct l2_bucket *l2b;
 	pv_entry_t pv;
-	pt_entry_t *pte;
+	pt_entry_t *ptep;
 	pmap_t pmap;
 	boolean_t rv;
 
@@ -3842,8 +3840,8 @@ pmap_is_modified(vm_page_t m)
 		pmap = PV_PMAP(pv);
 		PMAP_LOCK(pmap);
 		l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
-		pte = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
-		rv = (L2_S_WRITABLE(*pte));
+		ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+		rv = (L2_S_WRITABLE(*ptep));
 		PMAP_UNLOCK(pmap);
 		if (rv)
 			break;
@@ -3966,7 +3964,7 @@ out:
 }
 
 void
-pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
 {
 }
 
@@ -4067,7 +4065,7 @@ pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
 {
 	pd_entry_t *pde = (pd_entry_t *) l1pt;
 	pt_entry_t fl;
-	pt_entry_t *pte;
+	pt_entry_t *ptep;
 
 	KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin"));
 
@@ -4076,14 +4074,14 @@ pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
 	if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
 		panic("pmap_map_entry: no L2 table for VA 0x%08x", va);
 
-	pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
+	ptep = (pt_entry_t *)kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
 
-	if (pte == NULL)
+	if (ptep == NULL)
 		panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va);
 
-	pte[l2pte_index(va)] = L2_S_PROTO | pa | fl | L2_S_REF;
-	pmap_set_prot(&pte[l2pte_index(va)], prot, 0);
-	PTE_SYNC(&pte[l2pte_index(va)]);
+	ptep[l2pte_index(va)] = L2_S_PROTO | pa | fl | L2_S_REF;
+	pmap_set_prot(&ptep[l2pte_index(va)], prot, 0);
+	PTE_SYNC(&ptep[l2pte_index(va)]);
 }
 
 /*
@@ -4098,7 +4096,7 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
     vm_size_t size, int prot, int type)
 {
 	pd_entry_t *pde = (pd_entry_t *) l1pt;
-	pt_entry_t *pte, f1, f2s, f2l;
+	pt_entry_t *ptep, f1, f2s, f2l;
 	vm_size_t resid;
 	int i;
 
@@ -4142,9 +4140,9 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
 			panic("pmap_map_chunk: no L2 table for VA 0x%08x", va);
 
-		pte = (pt_entry_t *) kernel_pt_lookup(
+		ptep = (pt_entry_t *) kernel_pt_lookup(
 		    pde[L1_IDX(va)] & L1_C_ADDR_MASK);
-		if (pte == NULL)
+		if (ptep == NULL)
 			panic("pmap_map_chunk: can't find L2 table for VA"
 			    "0x%08x", va);
 		/* See if we can use a L2 large page mapping. */
@@ -4153,10 +4151,10 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 			printf("L");
 #endif
 			for (i = 0; i < 16; i++) {
-				pte[l2pte_index(va) + i] =
+				ptep[l2pte_index(va) + i] =
 				    L2_L_PROTO | pa |
 				    L2_L_PROT(PTE_KERNEL, prot) | f2l;
-				PTE_SYNC(&pte[l2pte_index(va) + i]);
+				PTE_SYNC(&ptep[l2pte_index(va) + i]);
 			}
 			va += L2_L_SIZE;
 			pa += L2_L_SIZE;
@@ -4168,9 +4166,9 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 #ifdef VERBOSE_INIT_ARM
 		printf("P");
 #endif
-		pte[l2pte_index(va)] = L2_S_PROTO | pa | f2s | L2_S_REF;
-		pmap_set_prot(&pte[l2pte_index(va)], prot, 0);
-		PTE_SYNC(&pte[l2pte_index(va)]);
+		ptep[l2pte_index(va)] = L2_S_PROTO | pa | f2s | L2_S_REF;
+		pmap_set_prot(&ptep[l2pte_index(va)], prot, 0);
+		PTE_SYNC(&ptep[l2pte_index(va)]);
 		va += PAGE_SIZE;
 		pa += PAGE_SIZE;
 		resid -= PAGE_SIZE;
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 0875f83..c994239 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -1798,8 +1798,6 @@ pmap_pinit0(struct pmap *pmap)
 {
 	PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap));
 
-	dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n",
-		(u_int32_t) pmap, (u_int32_t) pmap->pm_pdir);
 	bcopy(kernel_pmap, pmap, sizeof(*pmap));
 	bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx));
 	PMAP_LOCK_INIT(pmap);
diff --git a/sys/arm/arm/trap.c b/sys/arm/arm/trap.c
index a9dffa2..08b4396 100644
--- a/sys/arm/arm/trap.c
+++ b/sys/arm/arm/trap.c
@@ -391,22 +391,21 @@ data_abort_handler(trapframe_t *tf)
 	 * Otherwise we need to disassemble the instruction
 	 * responsible to determine if it was a write.
 	 */
-	if (IS_PERMISSION_FAULT(fsr)) {
+	if (IS_PERMISSION_FAULT(fsr))
 		ftype = VM_PROT_WRITE;
-	} else {
+	else {
 		u_int insn = ReadWord(tf->tf_pc);
 
 		if (((insn & 0x0c100000) == 0x04000000) ||	/* STR/STRB */
 		    ((insn & 0x0e1000b0) == 0x000000b0) ||	/* STRH/STRD */
-		    ((insn & 0x0a100000) == 0x08000000))	/* STM/CDT */
-		{
+		    ((insn & 0x0a100000) == 0x08000000)) {	/* STM/CDT */
 			ftype = VM_PROT_WRITE;
-	}
-		else
-		if ((insn & 0x0fb00ff0) == 0x01000090)		/* SWP */
-			ftype = VM_PROT_READ | VM_PROT_WRITE;
-		else
-			ftype = VM_PROT_READ;
+		} else {
+			if ((insn & 0x0fb00ff0) == 0x01000090)	/* SWP */
+				ftype = VM_PROT_READ | VM_PROT_WRITE;
+			else
+				ftype = VM_PROT_READ;
+		}
 	}
 
 	/*
diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h
index e91fce7..445b43f 100644
--- a/sys/arm/include/pmap.h
+++ b/sys/arm/include/pmap.h
@@ -150,7 +150,6 @@ struct	pmap {
 	u_int8_t		pm_domain;
 	struct l1_ttable	*pm_l1;
 	struct l2_dtable	*pm_l2[L2_SIZE];
-	pd_entry_t		*pm_pdir;	/* KVA of page directory */
 	cpuset_t		pm_active;	/* active on cpus */
 	struct pmap_statistics	pm_stats;	/* pmap statictics */
 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
-- 
1.8.2


--------------070804070705080607020805
Content-Type: text/x-patch;
 name="0003-arm-Stop-using-PVF_MOD-PVF_REF-PVF_EXEC-flags-in-pv_.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
 filename*0="0003-arm-Stop-using-PVF_MOD-PVF_REF-PVF_EXEC-flags-in-pv_.pa";
 filename*1="tch"

>From 5c9420e208e520cb5c0653b196ecede760fc28fb Mon Sep 17 00:00:00 2001
From: Zbigniew Bodek <zbb@semihalf.com>
Date: Sat, 27 Apr 2013 18:12:28 +0200
Subject: [PATCH 3/5] arm: Stop using PVF_MOD, PVF_REF & PVF_EXEC flags in
 pv_entry. Use PTE instead.

Using PVF_MOD, PVF_REF and PVF_EXEC is redundant as we can get the proper info
from PTE bits.
When the mapping is marked as executable and has been referenced we assume that
is has been executed. Similarly, when the mapping is set to be writable and is
referenced, it must have been due to write access to it.
PVF_MOD and PVF_REF flags are kept just for pmap_clearbit() usage, to pass the
information about which bit should be cleared.
---
 sys/arm/arm/pmap-v6.c  | 68 ++++++++++++++++++++++++--------------------------
 sys/arm/include/pmap.h |  1 +
 2 files changed, 33 insertions(+), 36 deletions(-)

diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 723c76d..afbaa8f 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -383,13 +383,13 @@ int	pmap_needs_pte_sync;
  * Macro to determine if a mapping might be resident in the
  * instruction cache and/or TLB
  */
-#define	PV_BEEN_EXECD(f)  (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
+#define	PTE_BEEN_EXECD(pte)  (L2_S_EXECUTABLE(pte) && L2_S_REFERENCED(pte))
 
 /*
  * Macro to determine if a mapping might be resident in the
  * data cache and/or TLB
  */
-#define	PV_BEEN_REFD(f)   (((f) & PVF_REF) != 0)
+#define	PTE_BEEN_REFD(pte)   (L2_S_REFERENCED(pte))
 
 #ifndef PMAP_SHPGPERPROC
 #define PMAP_SHPGPERPROC 200
@@ -947,9 +947,9 @@ pmap_clearbit(struct vm_page *m, u_int maskbits)
 			*ptep = npte;
 			PTE_SYNC(ptep);
 			/* Flush the TLB entry if a current pmap. */
-			if (PV_BEEN_EXECD(oflags))
+			if (PTE_BEEN_EXECD(opte))
 				cpu_tlb_flushID_SE(pv->pv_va);
-			else if (PV_BEEN_REFD(oflags))
+			else if (PTE_BEEN_REFD(opte))
 				cpu_tlb_flushD_SE(pv->pv_va);
 		}
 
@@ -1358,7 +1358,6 @@ pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype, int user)
 		}
 
 		vm_page_dirty(m);
-		pv->pv_flags |= PVF_REF | PVF_MOD;
 
 		/* Re-enable write permissions for the page */
 		pmap_set_prot(ptep, VM_PROT_WRITE, *ptep & L2_S_PROT_U);
@@ -1382,7 +1381,6 @@ pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype, int user)
 			goto out;
 
 		vm_page_aflag_set(m, PGA_REFERENCED);
-		pv->pv_flags |= PVF_REF;
 
 		/* Mark the page "referenced" */
 		*ptep = pte | L2_S_REF;
@@ -2436,7 +2434,7 @@ pmap_remove_all(vm_page_t m)
 	struct l2_bucket *l2b;
 	boolean_t flush = FALSE;
 	pmap_t curpmap;
-	int flags = 0;
+	u_int is_exec = 0;
 
 	KASSERT((m->flags & PG_FICTITIOUS) == 0,
 	    ("pmap_remove_all: page %p is fictitious", m));
@@ -2455,19 +2453,19 @@ pmap_remove_all(vm_page_t m)
 		l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
 		KASSERT(l2b != NULL, ("No l2 bucket"));
 		ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+		is_exec |= PTE_BEEN_EXECD(*ptep);
 		*ptep = 0;
 		if (pmap_is_current(pmap))
 			PTE_SYNC(ptep);
 		pmap_free_l2_bucket(pmap, l2b, 1);
 		pmap->pm_stats.resident_count--;
-		flags |= pv->pv_flags;
 		pmap_nuke_pv(m, pmap, pv);
 		pmap_free_pv_entry(pmap, pv);
 		PMAP_UNLOCK(pmap);
 	}
 
 	if (flush) {
-		if (PV_BEEN_EXECD(flags))
+		if (is_exec)
 			cpu_tlb_flushID();
 		else
 			cpu_tlb_flushD();
@@ -2545,7 +2543,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 	struct l2_bucket *l2b;
 	pt_entry_t *ptep, pte;
 	vm_offset_t next_bucket;
-	u_int flags;
+	u_int is_exec, is_refd;
 	int flush;
 
 	if ((prot & VM_PROT_READ) == 0) {
@@ -2570,7 +2568,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 	 */
 
 	flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
-	flags = 0;
+	is_exec = is_refd = 0;
 
 	while (sva < eva) {
 		next_bucket = L2_NEXT_BUCKET(sva);
@@ -2588,25 +2586,24 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 		while (sva < next_bucket) {
 			if ((pte = *ptep) != 0 && L2_S_WRITABLE(pte)) {
 				struct vm_page *m;
-				u_int f;
 
 				m = PHYS_TO_VM_PAGE(l2pte_pa(pte));
 				pmap_set_prot(ptep, prot,
 				    !(pmap == pmap_kernel()));
 				PTE_SYNC(ptep);
 
-				f = pmap_modify_pv(m, pmap, sva,
-				    PVF_WRITE, 0);
+				pmap_modify_pv(m, pmap, sva, PVF_WRITE, 0);
 
 				if (flush >= 0) {
 					flush++;
-					flags |= f;
-				} else
-				if (PV_BEEN_EXECD(f))
-					cpu_tlb_flushID_SE(sva);
-				else
-				if (PV_BEEN_REFD(f))
-					cpu_tlb_flushD_SE(sva);
+					is_exec |= PTE_BEEN_EXECD(pte);
+					is_refd |= PTE_BEEN_REFD(pte);
+				} else {
+					if (PTE_BEEN_EXECD(pte))
+						cpu_tlb_flushID_SE(sva);
+					else if (PTE_BEEN_REFD(pte))
+						cpu_tlb_flushD_SE(sva);
+				}
 			}
 
 			sva += PAGE_SIZE;
@@ -2616,10 +2613,10 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 
 
 	if (flush) {
-		if (PV_BEEN_EXECD(flags))
+		if (is_exec)
 			cpu_tlb_flushID();
 		else
-		if (PV_BEEN_REFD(flags))
+		if (is_refd)
 			cpu_tlb_flushD();
 	}
 	rw_wunlock(&pvh_global_lock);
@@ -2665,7 +2662,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 	struct pv_entry *pve = NULL;
 	pt_entry_t *ptep, npte, opte;
 	u_int nflags;
-	u_int oflags;
+	u_int is_exec, is_refd;
 	vm_paddr_t pa;
 	u_char user;
 
@@ -2692,8 +2689,6 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 
 	if (prot & VM_PROT_WRITE)
 		nflags |= PVF_WRITE;
-	if (prot & VM_PROT_EXECUTE)
-		nflags |= PVF_EXEC;
 	if (wired)
 		nflags |= PVF_WIRED;
 
@@ -2725,7 +2720,8 @@ do_l2b_alloc:
 
 	opte = *ptep;
 	npte = pa;
-	oflags = 0;
+	is_exec = is_refd = 0;
+
 	if (opte) {
 		/*
 		 * There is already a mapping at this address.
@@ -2748,7 +2744,6 @@ do_l2b_alloc:
 		 *   so no need to re-do referenced emulation here.
 		 */
 		npte |= L2_S_REF;
-		nflags |= PVF_REF;
 
 		if (m != NULL &&
 		    (m->oflags & VPO_UNMANAGED) == 0)
@@ -2799,9 +2794,9 @@ do_l2b_alloc:
 		/*
 		 * We're changing the attrs of an existing mapping.
 		 */
-		oflags = pmap_modify_pv(m, pmap, va,
-		    PVF_WRITE | PVF_EXEC | PVF_WIRED |
-		    PVF_MOD | PVF_REF, nflags);
+		pmap_modify_pv(m, pmap, va, PVF_WRITE | PVF_WIRED, nflags);
+		is_exec |= PTE_BEEN_EXECD(opte);
+		is_refd |= PTE_BEEN_REFD(opte);
 	} else {
 		/*
 		 * New mapping, or changing the backing page
@@ -2814,7 +2809,8 @@ do_l2b_alloc:
 			 * must remove it from the PV list
 			 */
 			if ((pve = pmap_remove_pv(om, pmap, va))) {
-			    oflags = pve->pv_flags;
+				is_exec |= PTE_BEEN_EXECD(opte);
+				is_refd |= PTE_BEEN_REFD(opte);
 
 			    if (m && ((m->oflags & VPO_UNMANAGED)))
 				pmap_free_pv_entry(pmap, pve);
@@ -2877,9 +2873,9 @@ do_l2b_alloc:
 			}
 		}
 
-		if (PV_BEEN_EXECD(oflags))
+		if (is_exec)
 			cpu_tlb_flushID_SE(va);
-		else if (PV_BEEN_REFD(oflags))
+		else if (is_refd)
 			cpu_tlb_flushD_SE(va);
 	}
 
@@ -3486,8 +3482,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 
 				pve = pmap_remove_pv(m, pmap, sva);
 				if (pve) {
-					is_exec = PV_BEEN_EXECD(pve->pv_flags);
-					is_refd = PV_BEEN_REFD(pve->pv_flags);
+					is_exec = PTE_BEEN_EXECD(pte);
+					is_refd = PTE_BEEN_REFD(pte);
 					pmap_free_pv_entry(pmap, pve);
 				}
 			}
diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h
index 445b43f..ec40682 100644
--- a/sys/arm/include/pmap.h
+++ b/sys/arm/include/pmap.h
@@ -391,6 +391,7 @@ extern int pmap_needs_pte_sync;
 #define L2_S_REF		(L2_AP0(1))	/* reference flag */
 
 #define	L2_S_PROT_MASK		(L2_S_PROT_U|L2_S_PROT_R)
+#define	L2_S_EXECUTABLE(pte)	(!(pte & L2_XN))
 #define	L2_S_WRITABLE(pte)	(!(pte & L2_APX))
 #define	L2_S_REFERENCED(pte)	(!!(pte & L2_S_REF))
 
-- 
1.8.2


--------------070804070705080607020805
Content-Type: text/x-patch;
 name="0004-arm-Get-rid-of-VERBOSE_INIT_ARM-option.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
 filename="0004-arm-Get-rid-of-VERBOSE_INIT_ARM-option.patch"

>From 9f460d65c20f74b4763ee9bfc0f157b4474c471e Mon Sep 17 00:00:00 2001
From: Zbigniew Bodek <zbb@semihalf.com>
Date: Thu, 25 Apr 2013 15:37:07 +0200
Subject: [PATCH 4/5] arm: Get rid of VERBOSE_INIT_ARM option

This "NetBSDism" won't actually work on FreeBSD as the console is being
initialized after pmap preliminary setup.
On the other hand, drivers that were using VERBOSE_INIT_ARM for debug
purposes are not part of or are not called in initarm() so the option
is misused. In such cases exchange VERBOSE_INIT_ARM to debug printf
(DPRINTF) and delete all other useless VERBOSE_INIT_ARM ifdefs.
---
 sys/arm/arm/pmap-v6.c              | 28 ----------------------------
 sys/arm/arm/pmap.c                 | 28 ----------------------------
 sys/arm/conf/BWCT                  |  2 --
 sys/arm/conf/CAMBRIA               |  1 -
 sys/arm/conf/EP80219               |  1 -
 sys/arm/xscale/i80321/i80321_mcu.c |  9 ---------
 sys/arm/xscale/i80321/iq80321.c    | 21 +++++++++++----------
 sys/conf/options.arm               |  1 -
 8 files changed, 11 insertions(+), 80 deletions(-)

diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index afbaa8f..e937152 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -4039,10 +4039,6 @@ pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv)
 
 	proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
 
-#ifdef VERBOSE_INIT_ARM
-	printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va);
-#endif
-
 	pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
 	PTE_SYNC(&pde[slot]);
 
@@ -4101,11 +4097,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 	if (l1pt == 0)
 		panic("pmap_map_chunk: no L1 table provided");
 
-#ifdef VERBOSE_INIT_ARM
-	printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x "
-	    "prot=0x%x type=%d\n", pa, va, size, resid, prot, type);
-#endif
-
 	f1 = l1_mem_types[type];
 	f2l = l2l_mem_types[type];
 	f2s = l2s_mem_types[type];
@@ -4115,9 +4106,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 	while (resid > 0) {
 		/* See if we can use a section mapping. */
 		if (L1_S_MAPPABLE_P(va, pa, resid)) {
-#ifdef VERBOSE_INIT_ARM
-			printf("S");
-#endif
 			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
 			    L1_S_PROT(PTE_KERNEL, prot) | f1 |
 			    L1_S_DOM(PMAP_DOMAIN_KERNEL);
@@ -4143,9 +4131,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 			    "0x%08x", va);
 		/* See if we can use a L2 large page mapping. */
 		if (L2_L_MAPPABLE_P(va, pa, resid)) {
-#ifdef VERBOSE_INIT_ARM
-			printf("L");
-#endif
 			for (i = 0; i < 16; i++) {
 				ptep[l2pte_index(va) + i] =
 				    L2_L_PROTO | pa |
@@ -4159,9 +4144,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 		}
 
 		/* Use a small page mapping. */
-#ifdef VERBOSE_INIT_ARM
-		printf("P");
-#endif
 		ptep[l2pte_index(va)] = L2_S_PROTO | pa | f2s | L2_S_REF;
 		pmap_set_prot(&ptep[l2pte_index(va)], prot, 0);
 		PTE_SYNC(&ptep[l2pte_index(va)]);
@@ -4169,9 +4151,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 		pa += PAGE_SIZE;
 		resid -= PAGE_SIZE;
 	}
-#ifdef VERBOSE_INIT_ARM
-	printf("\n");
-#endif
 	return (size);
 
 }
@@ -4205,13 +4184,6 @@ pmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table)
 	pmap_devmap_table = table;
 
 	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
-#ifdef VERBOSE_INIT_ARM
-		printf("devmap: %08x -> %08x @ %08x\n",
-		    pmap_devmap_table[i].pd_pa,
-		    pmap_devmap_table[i].pd_pa +
-			pmap_devmap_table[i].pd_size - 1,
-		    pmap_devmap_table[i].pd_va);
-#endif
 		pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
 		    pmap_devmap_table[i].pd_pa,
 		    pmap_devmap_table[i].pd_size,
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index c994239..600c50b 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -4780,10 +4780,6 @@ pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv)
 
 	proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
 
-#ifdef VERBOSE_INIT_ARM
-	printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va);
-#endif
-
 	pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
 
 	PTE_SYNC(&pde[slot]);
@@ -4857,11 +4853,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 	if (l1pt == 0)
 		panic("pmap_map_chunk: no L1 table provided");
 
-#ifdef VERBOSE_INIT_ARM
-	printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x "
-	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
-#endif
-
 	switch (cache) {
 	case PTE_NOCACHE:
 	default:
@@ -4888,9 +4879,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 	while (resid > 0) {
 		/* See if we can use a section mapping. */
 		if (L1_S_MAPPABLE_P(va, pa, resid)) {
-#ifdef VERBOSE_INIT_ARM
-			printf("S");
-#endif
 			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
 			    L1_S_PROT(PTE_KERNEL, prot) | f1 |
 			    L1_S_DOM(PMAP_DOMAIN_KERNEL);
@@ -4916,9 +4904,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 			    "0x%08x", va);
 		/* See if we can use a L2 large page mapping. */
 		if (L2_L_MAPPABLE_P(va, pa, resid)) {
-#ifdef VERBOSE_INIT_ARM
-			printf("L");
-#endif
 			for (i = 0; i < 16; i++) {
 				pte[l2pte_index(va) + i] =
 				    L2_L_PROTO | pa |
@@ -4932,9 +4917,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 		}
 
 		/* Use a small page mapping. */
-#ifdef VERBOSE_INIT_ARM
-		printf("P");
-#endif
 		pte[l2pte_index(va)] =
 		    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
 		PTE_SYNC(&pte[l2pte_index(va)]);
@@ -4942,9 +4924,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 		pa += PAGE_SIZE;
 		resid -= PAGE_SIZE;
 	}
-#ifdef VERBOSE_INIT_ARM
-	printf("\n");
-#endif
 	return (size);
 
 }
@@ -4978,13 +4957,6 @@ pmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table)
 	pmap_devmap_table = table;
 
 	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
-#ifdef VERBOSE_INIT_ARM
-		printf("devmap: %08x -> %08x @ %08x\n",
-		    pmap_devmap_table[i].pd_pa,
-		    pmap_devmap_table[i].pd_pa +
-			pmap_devmap_table[i].pd_size - 1,
-		    pmap_devmap_table[i].pd_va);
-#endif
 		pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
 		    pmap_devmap_table[i].pd_pa,
 		    pmap_devmap_table[i].pd_size,
diff --git a/sys/arm/conf/BWCT b/sys/arm/conf/BWCT
index 4c6a1f1..f9145c5 100644
--- a/sys/arm/conf/BWCT
+++ b/sys/arm/conf/BWCT
@@ -19,8 +19,6 @@
 
 ident		BWCT
 
-options 	VERBOSE_INIT_ARM
-
 include		"../at91/std.bwct"
 
 #To statically compile in device wiring instead of /boot/device.hints
diff --git a/sys/arm/conf/CAMBRIA b/sys/arm/conf/CAMBRIA
index 377f45d..51a929b 100644
--- a/sys/arm/conf/CAMBRIA
+++ b/sys/arm/conf/CAMBRIA
@@ -64,7 +64,6 @@ options 	HWPMC_HOOKS
 device		hwpmc
 
 #options 	VERBOSE_SYSINIT
-options 	VERBOSE_INIT_ARM
 
 #device		saarm
 
diff --git a/sys/arm/conf/EP80219 b/sys/arm/conf/EP80219
index a112fe1..cad88f8 100644
--- a/sys/arm/conf/EP80219
+++ b/sys/arm/conf/EP80219
@@ -107,7 +107,6 @@ options 	ARM_USE_SMALL_ALLOC
 
 options 	INCLUDE_CONFIG_FILE     # Include this file in kernel
 #options 	VERBOSE_SYSINIT
-options 	VERBOSE_INIT_ARM
 
 device		bpf
 #options 	ROOTDEVNAME=\"ufs:ada0s1a\"
diff --git a/sys/arm/xscale/i80321/i80321_mcu.c b/sys/arm/xscale/i80321/i80321_mcu.c
index a51a433..e87a78d 100644
--- a/sys/arm/xscale/i80321/i80321_mcu.c
+++ b/sys/arm/xscale/i80321/i80321_mcu.c
@@ -67,11 +67,6 @@ i80321_sdram_bounds(bus_space_tag_t st, bus_space_handle_t sh,
 	sbr0 = bus_space_read_4(st, sh, MCU_SBR0);
 	sbr1 = bus_space_read_4(st, sh, MCU_SBR1);
 
-#ifdef VERBOSE_INIT_ARM
-	printf("i80321: SBDR = 0x%08x SBR0 = 0x%08x SBR1 = 0x%08x\n",
-	    sdbr, sbr0, sbr1);
-#endif
-
 	*start = sdbr;
 
 	sdbr = (sdbr >> 25) & 0x1f;
@@ -82,9 +77,5 @@ i80321_sdram_bounds(bus_space_tag_t st, bus_space_handle_t sh,
 	bank0 = (sbr0 - sdbr) << 25;
 	bank1 = (sbr1 - sbr0) << 25;
 
-#ifdef VERBOSE_INIT_ARM
-	printf("i80321: BANK0 = 0x%08x BANK1 = 0x%08x\n", bank0, bank1);
-#endif
-
 	*size = bank0 + bank1;
 }
diff --git a/sys/arm/xscale/i80321/iq80321.c b/sys/arm/xscale/i80321/iq80321.c
index f43f8e4..49e6313 100644
--- a/sys/arm/xscale/i80321/iq80321.c
+++ b/sys/arm/xscale/i80321/iq80321.c
@@ -63,6 +63,13 @@ __FBSDID("$FreeBSD$");
 
 #include <dev/pci/pcireg.h>
 
+#undef DEBUG
+
+#ifdef DEBUG
+#define	DPRINTF(fmt, arg...)	printf(fmt, ##arg)
+#else
+#define	DPRINTF(fmt, arg...)
+#endif
 
 int	iq80321_probe(device_t);
 void	iq80321_identify(driver_t *, device_t);
@@ -158,10 +165,8 @@ iq80321_attach(device_t dev)
 	b1l = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, PCIR_BARS+0x8);
 	b1u = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, PCIR_BARS+0xc);
 
-#ifdef VERBOSE_INIT_ARM	
-	printf("i80321: BAR0 = %08x.%08x BAR1 = %08x.%08x\n",
+	DPRINTF("i80321: BAR0 = %08x.%08x BAR1 = %08x.%08x\n",
 		   b0l,b0u, b1l, b1u );
-#endif
 
 #define PCI_MAPREG_MEM_ADDR_MASK	0xfffffff0
 	b0l &= PCI_MAPREG_MEM_ADDR_MASK;
@@ -169,10 +174,8 @@ iq80321_attach(device_t dev)
 	b1l &= PCI_MAPREG_MEM_ADDR_MASK;
 	b1u &= PCI_MAPREG_MEM_ADDR_MASK;
 
-#ifdef VERBOSE_INIT_ARM	
-	printf("i80219: BAR0 = %08x.%08x BAR1 = %08x.%08x\n",
+	DPRINTF("i80219: BAR0 = %08x.%08x BAR1 = %08x.%08x\n",
 		   b0l,b0u, b1l, b1u );
-#endif
 
 	if ((b0u != b1u) || (b0l != 0) || ((b1l & ~0x80000000U) != 0))
 		sc->sc_is_host = 0;
@@ -243,20 +246,18 @@ iq80321_attach(device_t dev)
 	sc->sc_iwin[3].iwin_xlate = 0;
 	sc->sc_iwin[3].iwin_size = 0;
 	
-#ifdef 	VERBOSE_INIT_ARM
-	printf("i80321: Reserve space for private devices (Inbound Window 1) \n hi:0x%08x lo:0x%08x xlate:0x%08x size:0x%08x\n",
+	DPRINTF("i80321: Reserve space for private devices (Inbound Window 1) \n hi:0x%08x lo:0x%08x xlate:0x%08x size:0x%08x\n",
 		   sc->sc_iwin[1].iwin_base_hi,
 		   sc->sc_iwin[1].iwin_base_lo,
 		   sc->sc_iwin[1].iwin_xlate,
 		   sc->sc_iwin[1].iwin_size
 		);
-	printf("i80321: RAM access (Inbound Window 2) \n hi:0x%08x lo:0x%08x xlate:0x%08x size:0x%08x\n",
+	DPRINTF("i80321: RAM access (Inbound Window 2) \n hi:0x%08x lo:0x%08x xlate:0x%08x size:0x%08x\n",
 		   sc->sc_iwin[2].iwin_base_hi,
 		   sc->sc_iwin[2].iwin_base_lo,
 		   sc->sc_iwin[2].iwin_xlate,
 		   sc->sc_iwin[2].iwin_size
 		);
-#endif
 
 	/*
 	 * We set up the Outbound Windows as follows:
diff --git a/sys/conf/options.arm b/sys/conf/options.arm
index 70dccf8..93e263d 100644
--- a/sys/conf/options.arm
+++ b/sys/conf/options.arm
@@ -52,7 +52,6 @@ SOC_TEGRA2		opt_global.h
 STARTUP_PAGETABLE_ADDR	opt_global.h
 XSCALE_CACHE_READ_WRITE_ALLOCATE	opt_global.h
 XSACLE_DISABLE_CCNT	opt_timer.h
-VERBOSE_INIT_ARM	opt_global.h
 VM_MAXUSER_ADDRESS	opt_global.h
 AT91_ATE_USE_RMII	opt_at91.h
 AT91_MCI_HAS_4WIRE	opt_at91.h
-- 
1.8.2


--------------070804070705080607020805
Content-Type: text/x-patch;
 name="0005-arm-Rework-and-organize-pmap_enter_locked-function.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
 filename*0="0005-arm-Rework-and-organize-pmap_enter_locked-function.patc";
 filename*1="h"

>From 6a7d0a820aa637136aea84d62d11f782bd1640ea Mon Sep 17 00:00:00 2001
From: Zbigniew Bodek <zbb@semihalf.com>
Date: Mon, 29 Apr 2013 18:58:57 +0200
Subject: [PATCH 5/5] arm: Rework and organize pmap_enter_locked() function.

pmap_enter_locked() implementation was very ambiguous and confusing.
Rearrange it so that each part of the mapping creation is separated.
Avoid walking through the redundant conditions.
Extract vector_page specific PTE setup from normal PTE setting.
---
 sys/arm/arm/pmap-v6.c | 158 +++++++++++++++++++++++---------------------------
 1 file changed, 72 insertions(+), 86 deletions(-)

diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index e937152..c3237e1 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -2723,38 +2723,54 @@ do_l2b_alloc:
 	is_exec = is_refd = 0;
 
 	if (opte) {
-		/*
-		 * There is already a mapping at this address.
-		 * If the physical address is different, lookup the
-		 * vm_page.
-		 */
-		if (l2pte_pa(opte) != pa)
-			om = PHYS_TO_VM_PAGE(l2pte_pa(opte));
-		else
-			om = m;
-	} else
-		om = NULL;
-
-	if ((prot & (VM_PROT_ALL)) || !m) {
-		/*
-		 * - The access type indicates that we don't need
-		 *   to do referenced emulation.
-		 * OR
-		 * - The physical page has already been referenced
-		 *   so no need to re-do referenced emulation here.
-		 */
-		npte |= L2_S_REF;
+		if (l2pte_pa(opte) == pa) {
+			/*
+			 * We're changing the attrs of an existing mapping.
+			 */
+			if (m != NULL)
+				pmap_modify_pv(m, pmap, va,
+				    PVF_WRITE | PVF_WIRED, nflags);
+			is_exec |= PTE_BEEN_EXECD(opte);
+			is_refd |= PTE_BEEN_REFD(opte);
+			goto validate;
+		}
+		if ((om = PHYS_TO_VM_PAGE(l2pte_pa(opte)))) {
+			/*
+			 * Replacing an existing mapping with a new one.
+			 * It is part of our managed memory so we
+			 * must remove it from the PV list
+			 */
+			if ((pve = pmap_remove_pv(om, pmap, va))) {
+				is_exec |= PTE_BEEN_EXECD(opte);
+				is_refd |= PTE_BEEN_REFD(opte);
+		
+				if (m && ((m->oflags & VPO_UNMANAGED)))
+					pmap_free_pv_entry(pmap, pve);
+			}
+		}
 
-		if (m != NULL &&
-		    (m->oflags & VPO_UNMANAGED) == 0)
-			vm_page_aflag_set(m, PGA_REFERENCED);
 	} else {
 		/*
-		 * Need to do page referenced emulation.
+		 * Keep the stats up to date
 		 */
-		npte &= ~L2_S_REF;
+		l2b->l2b_occupancy++;
+		pmap->pm_stats.resident_count++;
 	}
 
+	/*
+	 * Enter on the PV list if part of our managed memory.
+	 */
+	if ((m && !(m->oflags & VPO_UNMANAGED))) {
+		if ((!pve) && (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
+			panic("pmap_enter: no pv entries");
+
+		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
+		("pmap_enter: managed mapping within the clean submap"));
+		KASSERT(pve != NULL, ("No pv"));
+		pmap_enter_pv(m, pve, pmap, va, nflags);
+	}
+
+validate:
 	/* Make the new PTE valid */
 	npte |= L2_S_PROTO;
 #ifdef SMP
@@ -2763,78 +2779,48 @@ do_l2b_alloc:
 	/* Set defaults first - kernel read access */
 	npte |= L2_APX;
 	npte |= L2_S_PROT_R;
+	/* Set "referenced" flag */
+	npte |= L2_S_REF;
 
 	/* Now tune APs as desired */
 	if (user)
 		npte |= L2_S_PROT_U;
-
-	if (prot & VM_PROT_WRITE) {
-		npte &= ~(L2_APX);
-
-		if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0) {
-			vm_page_aflag_set(m, PGA_WRITEABLE);
+	/*
+	 * If this is not a vector_page
+	 * then continue setting mapping parameters
+	 */
+	if (m != NULL) {
+		if (prot & (VM_PROT_ALL)) {
+			if ((m->oflags & VPO_UNMANAGED) == 0)
+				vm_page_aflag_set(m, PGA_REFERENCED);
+		} else {
 			/*
-			 * The access type and permissions indicate 
-			 * that the page will be written as soon as returned
-			 * from fault service.
-			 * Mark it dirty from the outset.
+			 * Need to do page referenced emulation.
 			 */
-			if ((access & VM_PROT_WRITE) != 0)
-				vm_page_dirty(m);
+			npte &= ~L2_S_REF;
 		}
-	}
-
-	if (!(prot & VM_PROT_EXECUTE) && m)
-		npte |= L2_XN;
 
-	if (m && (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE))
-		npte |= pte_l2_s_cache_mode;
-
-	if (m && m == om) {
-		/*
-		 * We're changing the attrs of an existing mapping.
-		 */
-		pmap_modify_pv(m, pmap, va, PVF_WRITE | PVF_WIRED, nflags);
-		is_exec |= PTE_BEEN_EXECD(opte);
-		is_refd |= PTE_BEEN_REFD(opte);
-	} else {
-		/*
-		 * New mapping, or changing the backing page
-		 * of an existing mapping.
-		 */
-		if (om) {
-			/*
-			 * Replacing an existing mapping with a new one.
-			 * It is part of our managed memory so we
-			 * must remove it from the PV list
-			 */
-			if ((pve = pmap_remove_pv(om, pmap, va))) {
-				is_exec |= PTE_BEEN_EXECD(opte);
-				is_refd |= PTE_BEEN_REFD(opte);
+		if (prot & VM_PROT_WRITE) {
+			/* Write enable */
+			npte &= ~(L2_APX);
 
-			    if (m && ((m->oflags & VPO_UNMANAGED)))
-				pmap_free_pv_entry(pmap, pve);
+			if ((m->oflags & VPO_UNMANAGED) == 0) {
+				vm_page_aflag_set(m, PGA_WRITEABLE);
+				/*
+				 * The access type and permissions indicate 
+				 * that the page will be written as soon as
+				 * returned from fault service.
+				 * Mark it dirty from the outset.
+				 */
+				if ((access & VM_PROT_WRITE) != 0)
+					vm_page_dirty(m);
 			}
 		}
+		if (!(prot & VM_PROT_EXECUTE))
+			npte |= L2_XN;
 
-		if ((m && !(m->oflags & VPO_UNMANAGED))) {
-			if ((!pve) &&
-			    (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
-				panic("pmap_enter: no pv entries");
-
-			KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
-			("pmap_enter: managed mapping within the clean submap"));
-			KASSERT(pve != NULL, ("No pv"));
-			pmap_enter_pv(m, pve, pmap, va, nflags);
-		}
-	}
-
-	/*
-	 * Keep the stats up to date
-	 */
-	if (opte == 0) {
-		l2b->l2b_occupancy++;
-		pmap->pm_stats.resident_count++;
+		if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
+			npte |= pte_l2_s_cache_mode;
 	}
 
 	CTR5(KTR_PMAP,"enter: pmap:%p va:%x prot:%x pte:%x->%x",
-- 
1.8.2


--------------070804070705080607020805--



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?519B6B1C.9060008>