Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 5 Apr 2012 00:53:22 +0000 (UTC)
From:      Nathan Whitehorn <nwhitehorn@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-9@freebsd.org
Subject:   svn commit: r233911 - stable/9/sys/powerpc/aim
Message-ID:  <201204050053.q350rMVK033015@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: nwhitehorn
Date: Thu Apr  5 00:53:21 2012
New Revision: 233911
URL: http://svn.freebsd.org/changeset/base/233911

Log:
  MFC 232980,233011,233017,233117,233434,233436,233454,233529,233530,233618:
  
  Major pmap performance, concurrency, and correctness improvements, mostly
  for the 64-bit PMAP module (64-bit-capable CPUs with either a 32-bit or
  64-bit kernel). Thanks to alc for his help and prodding.

Modified:
  stable/9/sys/powerpc/aim/mmu_oea.c
  stable/9/sys/powerpc/aim/mmu_oea64.c
  stable/9/sys/powerpc/aim/moea64_native.c
Directory Properties:
  stable/9/sys/   (props changed)

Modified: stable/9/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- stable/9/sys/powerpc/aim/mmu_oea.c	Thu Apr  5 00:08:16 2012	(r233910)
+++ stable/9/sys/powerpc/aim/mmu_oea.c	Thu Apr  5 00:53:21 2012	(r233911)
@@ -1760,7 +1760,7 @@ moea_release(mmu_t mmu, pmap_t pmap)
 void
 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 {
-	struct	pvo_entry *pvo;
+	struct	pvo_entry *pvo, *tpvo;
 	int	pteidx;
 
 	vm_page_lock_queues();
@@ -1772,7 +1772,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_off
 				moea_pvo_remove(pvo, pteidx);
 		}
 	} else {
-		LIST_FOREACH(pvo, &pm->pmap_pvo, pvo_plink) {
+		LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
 			if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
 				continue;
 			moea_pvo_remove(pvo, -1);

Modified: stable/9/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- stable/9/sys/powerpc/aim/mmu_oea64.c	Thu Apr  5 00:08:16 2012	(r233910)
+++ stable/9/sys/powerpc/aim/mmu_oea64.c	Thu Apr  5 00:53:21 2012	(r233911)
@@ -125,6 +125,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/msgbuf.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
+#include <sys/rwlock.h>
 #include <sys/sched.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
@@ -172,9 +173,20 @@ uintptr_t moea64_get_unique_vsid(void); 
 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
 #define	VSID_HASH_MASK		0x0000007fffffffffULL
 
-#define LOCK_TABLE() mtx_lock(&moea64_table_mutex)
-#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex);
-#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED)
+/*
+ * Locking semantics:
+ * -- Read lock: if no modifications are being made to either the PVO lists
+ *    or page table or if any modifications being made result in internal
+ *    changes (e.g. wiring, protection) such that the existence of the PVOs
+ *    is unchanged and they remain associated with the same pmap (in which
+ *    case the changes should be protected by the pmap lock)
+ * -- Write lock: required if PTEs/PVOs are being inserted or removed.
+ */
+
+#define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock)
+#define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock)
+#define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock)
+#define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock)
 
 struct ofw_map {
 	cell_t	om_va;
@@ -197,7 +209,7 @@ extern void bs_remap_earlyboot(void);
 /*
  * Lock for the pteg and pvo tables.
  */
-struct mtx	moea64_table_mutex;
+struct rwlock	moea64_table_lock;
 struct mtx	moea64_slb_mutex;
 
 /*
@@ -307,6 +319,7 @@ void moea64_qenter(mmu_t, vm_offset_t, v
 void moea64_qremove(mmu_t, vm_offset_t, int);
 void moea64_release(mmu_t, pmap_t);
 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
+void moea64_remove_pages(mmu_t, pmap_t);
 void moea64_remove_all(mmu_t, vm_page_t);
 void moea64_remove_write(mmu_t, vm_page_t);
 void moea64_zero_page(mmu_t, vm_page_t);
@@ -349,6 +362,7 @@ static mmu_method_t moea64_methods[] = {
 	MMUMETHOD(mmu_qremove,		moea64_qremove),
 	MMUMETHOD(mmu_release,		moea64_release),
 	MMUMETHOD(mmu_remove,		moea64_remove),
+	MMUMETHOD(mmu_remove_pages,	moea64_remove_pages),
 	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
 	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
 	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
@@ -393,35 +407,10 @@ vm_page_to_pvoh(vm_page_t m)
 }
 
 static __inline void
-moea64_attr_clear(vm_page_t m, u_int64_t ptebit)
-{
-
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	m->md.mdpg_attrs &= ~ptebit;
-}
-
-static __inline u_int64_t
-moea64_attr_fetch(vm_page_t m)
-{
-
-	return (m->md.mdpg_attrs);
-}
-
-static __inline void
-moea64_attr_save(vm_page_t m, u_int64_t ptebit)
-{
-
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	m->md.mdpg_attrs |= ptebit;
-}
-
-static __inline void
 moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 
     uint64_t pte_lo, int flags)
 {
 
-	ASSERT_TABLE_LOCK();
-
 	/*
 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
 	 * set when the real pte is set in memory.
@@ -614,6 +603,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_o
 
 	DISABLE_TRANS(msr);
 	if (hw_direct_map) {
+		LOCK_TABLE_WR();
 		PMAP_LOCK(kernel_pmap);
 		for (i = 0; i < pregions_sz; i++) {
 		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
@@ -638,6 +628,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_o
 		  }
 		}
 		PMAP_UNLOCK(kernel_pmap);
+		UNLOCK_TABLE_WR();
 	} else {
 		size = sizeof(struct pvo_head) * moea64_pteg_count;
 		off = (vm_offset_t)(moea64_pvo_table);
@@ -796,8 +787,7 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offs
 	 * Initialize the lock that synchronizes access to the pteg and pvo
 	 * tables.
 	 */
-	mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF |
-	    MTX_RECURSE);
+	rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE);
 	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
 
 	/*
@@ -976,7 +966,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_off
 
 			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
 			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
-			LOCK_TABLE();
+			LOCK_TABLE_RD();
 			moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE(
 			    mmup, moea64_scratchpage_pvo[i]);
 			moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi
@@ -984,7 +974,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_off
 			MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i],
 			    &moea64_scratchpage_pvo[i]->pvo_pte.lpte,
 			    moea64_scratchpage_pvo[i]->pvo_vpn);
-			UNLOCK_TABLE();
+			UNLOCK_TABLE_RD();
 		}
 	}
 }
@@ -1030,11 +1020,11 @@ moea64_change_wiring(mmu_t mmu, pmap_t p
 	uint64_t vsid;
 	int	i, ptegidx;
 
+	LOCK_TABLE_WR();
 	PMAP_LOCK(pm);
 	pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
 
 	if (pvo != NULL) {
-		LOCK_TABLE();
 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
 
 		if (wired) {
@@ -1070,8 +1060,8 @@ moea64_change_wiring(mmu_t mmu, pmap_t p
 			}
 		}
 			
-		UNLOCK_TABLE();
 	}
+	UNLOCK_TABLE_WR();
 	PMAP_UNLOCK(pm);
 }
 
@@ -1182,10 +1172,10 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_
     vm_prot_t prot, boolean_t wired)
 {
 
-	vm_page_lock_queues();
+	LOCK_TABLE_WR();
 	PMAP_LOCK(pmap);
 	moea64_enter_locked(mmu, pmap, va, m, prot, wired);
-	vm_page_unlock_queues();
+	UNLOCK_TABLE_WR();
 	PMAP_UNLOCK(pmap);
 }
 
@@ -1194,7 +1184,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_
  * target pmap with the protection requested.  If specified the page
  * will be wired down.
  *
- * The page queues and pmap must be locked.
+ * The table (write) and pmap must be locked.
  */
 
 static void
@@ -1220,8 +1210,6 @@ moea64_enter_locked(mmu_t mmu, pmap_t pm
 		pvo_flags = PVO_MANAGED;
 	}
 
-	if (pmap_bootstrapped)
-		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 	KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
 	    VM_OBJECT_LOCKED(m->object),
@@ -1319,14 +1307,14 @@ moea64_enter_object(mmu_t mmu, pmap_t pm
 
 	psize = atop(end - start);
 	m = m_start;
-	vm_page_lock_queues();
+	LOCK_TABLE_WR();
 	PMAP_LOCK(pm);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 		moea64_enter_locked(mmu, pm, start + ptoa(diff), m, prot &
 		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
 		m = TAILQ_NEXT(m, listq);
 	}
-	vm_page_unlock_queues();
+	UNLOCK_TABLE_WR();
 	PMAP_UNLOCK(pm);
 }
 
@@ -1335,11 +1323,11 @@ moea64_enter_quick(mmu_t mmu, pmap_t pm,
     vm_prot_t prot)
 {
 
-	vm_page_lock_queues();
+	LOCK_TABLE_WR();
 	PMAP_LOCK(pm);
 	moea64_enter_locked(mmu, pm, va, m,
 	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
-	vm_page_unlock_queues();
+	UNLOCK_TABLE_WR();
 	PMAP_UNLOCK(pm);
 }
 
@@ -1349,6 +1337,7 @@ moea64_extract(mmu_t mmu, pmap_t pm, vm_
 	struct	pvo_entry *pvo;
 	vm_paddr_t pa;
 
+	LOCK_TABLE_RD();
 	PMAP_LOCK(pm);
 	pvo = moea64_pvo_find_va(pm, va);
 	if (pvo == NULL)
@@ -1356,6 +1345,7 @@ moea64_extract(mmu_t mmu, pmap_t pm, vm_
 	else
 		pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
 		    (va - PVO_VADDR(pvo));
+	UNLOCK_TABLE_RD();
 	PMAP_UNLOCK(pm);
 	return (pa);
 }
@@ -1374,6 +1364,7 @@ moea64_extract_and_hold(mmu_t mmu, pmap_
         
 	m = NULL;
 	pa = 0;
+	LOCK_TABLE_RD();
 	PMAP_LOCK(pmap);
 retry:
 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
@@ -1387,6 +1378,7 @@ retry:
 		vm_page_hold(m);
 	}
 	PA_UNLOCK_COND(pa);
+	UNLOCK_TABLE_RD();
 	PMAP_UNLOCK(pmap);
 	return (m);
 }
@@ -1411,9 +1403,6 @@ moea64_uma_page_alloc(uma_zone_t zone, i
 	*flags = UMA_SLAB_PRIV;
 	needed_lock = !PMAP_LOCKED(kernel_pmap);
 
-	if (needed_lock)
-		PMAP_LOCK(kernel_pmap);
-
         if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
                 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
         else
@@ -1433,12 +1422,17 @@ moea64_uma_page_alloc(uma_zone_t zone, i
 
 	va = VM_PAGE_TO_PHYS(m);
 
+	LOCK_TABLE_WR();
+	if (needed_lock)
+		PMAP_LOCK(kernel_pmap);
+
 	moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
 	    &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
 	    PVO_WIRED | PVO_BOOTSTRAP);
 
 	if (needed_lock)
 		PMAP_UNLOCK(kernel_pmap);
+	UNLOCK_TABLE_WR();
 	
 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
                 bzero((void *)va, PAGE_SIZE);
@@ -1502,10 +1496,12 @@ moea64_is_prefaultable(mmu_t mmu, pmap_t
 	struct pvo_entry *pvo;
 	boolean_t rv;
 
+	LOCK_TABLE_RD();
 	PMAP_LOCK(pmap);
 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
 	rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
 	PMAP_UNLOCK(pmap);
+	UNLOCK_TABLE_RD();
 	return (rv);
 }
 
@@ -1547,7 +1543,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t
 	struct	pvo_entry *pvo;
 	uintptr_t pt;
 	pmap_t	pmap;
-	uint64_t lo;
+	uint64_t lo = 0;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_remove_write: page %p is not managed", m));
@@ -1561,13 +1557,11 @@ moea64_remove_write(mmu_t mmu, vm_page_t
 	if ((m->oflags & VPO_BUSY) == 0 &&
 	    (m->aflags & PGA_WRITEABLE) == 0)
 		return;
-	vm_page_lock_queues();
-	lo = moea64_attr_fetch(m);
 	powerpc_sync();
+	LOCK_TABLE_RD();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
-		LOCK_TABLE();
 		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
 			pt = MOEA64_PVO_TO_PTE(mmu, pvo);
 			pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
@@ -1582,15 +1576,12 @@ moea64_remove_write(mmu_t mmu, vm_page_t
 					isync();
 			}
 		}
-		UNLOCK_TABLE();
+		if ((lo & LPTE_CHG) != 0) 
+			vm_page_dirty(m);
 		PMAP_UNLOCK(pmap);
 	}
-	if ((lo & LPTE_CHG) != 0) {
-		moea64_attr_clear(m, LPTE_CHG);
-		vm_page_dirty(m);
-	}
+	UNLOCK_TABLE_RD();
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
-	vm_page_unlock_queues();
 }
 
 /*
@@ -1631,13 +1622,12 @@ moea64_page_set_memattr(mmu_t mmu, vm_pa
 		return;
 	}
 
-	vm_page_lock_queues();
 	pvo_head = vm_page_to_pvoh(m);
 	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
+	LOCK_TABLE_RD();
 	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
-		LOCK_TABLE();
 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
 		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
 		pvo->pvo_pte.lpte.pte_lo |= lo;
@@ -1647,11 +1637,10 @@ moea64_page_set_memattr(mmu_t mmu, vm_pa
 			if (pvo->pvo_pmap == kernel_pmap)
 				isync();
 		}
-		UNLOCK_TABLE();
 		PMAP_UNLOCK(pmap);
 	}
+	UNLOCK_TABLE_RD();
 	m->md.mdpg_cache_attrs = ma;
-	vm_page_unlock_queues();
 }
 
 /*
@@ -1665,9 +1654,12 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_
 
 	pte_lo = moea64_calc_wimg(pa, ma);
 
+	LOCK_TABLE_WR();
 	PMAP_LOCK(kernel_pmap);
 	error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
 	    &moea64_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
+	PMAP_UNLOCK(kernel_pmap);
+	UNLOCK_TABLE_WR();
 
 	if (error != 0 && error != ENOENT)
 		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
@@ -1678,7 +1670,6 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_
 	 */
 	if ((pte_lo & (LPTE_I | LPTE_G)) == 0)
 		__syncicache((void *)va, PAGE_SIZE);
-	PMAP_UNLOCK(kernel_pmap);
 }
 
 void
@@ -1705,11 +1696,13 @@ moea64_kextract(mmu_t mmu, vm_offset_t v
 	if (va < VM_MIN_KERNEL_ADDRESS)
 		return (va);
 
+	LOCK_TABLE_RD();
 	PMAP_LOCK(kernel_pmap);
 	pvo = moea64_pvo_find_va(kernel_pmap, va);
 	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
 	    va));
 	pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
+	UNLOCK_TABLE_RD();
 	PMAP_UNLOCK(kernel_pmap);
 	return (pa);
 }
@@ -1765,7 +1758,7 @@ moea64_page_exists_quick(mmu_t mmu, pmap
 	    ("moea64_page_exists_quick: page %p is not managed", m));
 	loops = 0;
 	rv = FALSE;
-	vm_page_lock_queues();
+	LOCK_TABLE_RD();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		if (pvo->pvo_pmap == pmap) {
 			rv = TRUE;
@@ -1774,7 +1767,7 @@ moea64_page_exists_quick(mmu_t mmu, pmap
 		if (++loops >= 16)
 			break;
 	}
-	vm_page_unlock_queues();
+	UNLOCK_TABLE_RD();
 	return (rv);
 }
 
@@ -1791,11 +1784,11 @@ moea64_page_wired_mappings(mmu_t mmu, vm
 	count = 0;
 	if ((m->oflags & VPO_UNMANAGED) != 0)
 		return (count);
-	vm_page_lock_queues();
+	LOCK_TABLE_RD();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
 		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
 			count++;
-	vm_page_unlock_queues();
+	UNLOCK_TABLE_RD();
 	return (count);
 }
 
@@ -1903,16 +1896,73 @@ moea64_pinit0(mmu_t mmu, pmap_t pm)
 /*
  * Set the physical protection on the specified range of this map as requested.
  */
+static void
+moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
+{
+	uintptr_t pt;
+	uint64_t oldlo;
+
+	PMAP_LOCK_ASSERT(pm, MA_OWNED);
+
+	/*
+	 * Grab the PTE pointer before we diddle with the cached PTE
+	 * copy.
+	 */
+	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
+
+	/*
+	 * Change the protection of the page.
+	 */
+	oldlo = pvo->pvo_pte.lpte.pte_lo;
+	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
+	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
+	if ((prot & VM_PROT_EXECUTE) == 0) 
+		pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
+	if (prot & VM_PROT_WRITE) 
+		pvo->pvo_pte.lpte.pte_lo |= LPTE_BW;
+	else
+		pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
+
+	/*
+	 * If the PVO is in the page table, update that pte as well.
+	 */
+	if (pt != -1) {
+		MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
+		    pvo->pvo_vpn);
+		if ((pvo->pvo_pte.lpte.pte_lo & 
+		    (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
+			moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
+			    pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
+			    PAGE_SIZE);
+		}
+	}
+
+	/*
+	 * Update vm about the REF/CHG bits if the page is managed and we have
+	 * removed write access.
+	 */
+	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 
+	    (oldlo & LPTE_PP) != LPTE_BR && !(prot && VM_PROT_WRITE)) {
+		struct	vm_page *pg;
+
+		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
+		if (pg != NULL) {
+			if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
+				vm_page_dirty(pg);
+			if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
+				vm_page_aflag_set(pg, PGA_REFERENCED);
+		}
+	}
+}
+
 void
 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
     vm_prot_t prot)
 {
-	struct	pvo_entry *pvo;
-	uintptr_t pt;
-
-	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
-	    eva, prot);
+	struct	pvo_entry *pvo, *tpvo;
 
+	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
+	    sva, eva, prot);
 
 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
 	    ("moea64_protect: non current pmap"));
@@ -1922,45 +1972,30 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_
 		return;
 	}
 
-	vm_page_lock_queues();
+	LOCK_TABLE_RD();
 	PMAP_LOCK(pm);
-	for (; sva < eva; sva += PAGE_SIZE) {
-		pvo = moea64_pvo_find_va(pm, sva);
-		if (pvo == NULL)
-			continue;
-
-		/*
-		 * Grab the PTE pointer before we diddle with the cached PTE
-		 * copy.
-		 */
-		LOCK_TABLE();
-		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
-
-		/*
-		 * Change the protection of the page.
-		 */
-		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
-		pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
-		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
-		if ((prot & VM_PROT_EXECUTE) == 0) 
-			pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
-
-		/*
-		 * If the PVO is in the page table, update that pte as well.
-		 */
-		if (pt != -1) {
-			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
-			    pvo->pvo_vpn);
-			if ((pvo->pvo_pte.lpte.pte_lo & 
-			    (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
-				moea64_syncicache(mmu, pm, sva,
-				    pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
-				    PAGE_SIZE);
+	if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
+		while (sva < eva) {
+			#ifdef __powerpc64__
+			if (pm != kernel_pmap &&
+			    user_va_to_slb_entry(pm, sva) == NULL) {
+				sva = roundup2(sva + 1, SEGMENT_LENGTH);
+				continue;
 			}
+			#endif
+			pvo = moea64_pvo_find_va(pm, sva);
+			if (pvo != NULL)
+				moea64_pvo_protect(mmu, pm, pvo, prot);
+			sva += PAGE_SIZE;
+		}
+	} else {
+		LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
+			if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
+				continue;
+			moea64_pvo_protect(mmu, pm, pvo, prot);
 		}
-		UNLOCK_TABLE();
 	}
-	vm_page_unlock_queues();
+	UNLOCK_TABLE_RD();
 	PMAP_UNLOCK(pm);
 }
 
@@ -2028,29 +2063,61 @@ moea64_release(mmu_t mmu, pmap_t pmap)
 }
 
 /*
+ * Remove all pages mapped by the specified pmap
+ */
+void
+moea64_remove_pages(mmu_t mmu, pmap_t pm)
+{
+	struct	pvo_entry *pvo, *tpvo;
+
+	LOCK_TABLE_WR();
+	PMAP_LOCK(pm);
+	LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
+		if (!(pvo->pvo_vaddr & PVO_WIRED))
+			moea64_pvo_remove(mmu, pvo);
+	}
+	UNLOCK_TABLE_WR();
+	PMAP_UNLOCK(pm);
+}
+
+/*
  * Remove the given range of addresses from the specified map.
  */
 void
 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 {
-	struct	pvo_entry *pvo;
+	struct	pvo_entry *pvo, *tpvo;
+
+	/*
+	 * Perform an unsynchronized read.  This is, however, safe.
+	 */
+	if (pm->pm_stats.resident_count == 0)
+		return;
 
-	vm_page_lock_queues();
+	LOCK_TABLE_WR();
 	PMAP_LOCK(pm);
-	if ((eva - sva)/PAGE_SIZE < 10) {
-		for (; sva < eva; sva += PAGE_SIZE) {
+	if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
+		while (sva < eva) {
+			#ifdef __powerpc64__
+			if (pm != kernel_pmap &&
+			    user_va_to_slb_entry(pm, sva) == NULL) {
+				sva = roundup2(sva + 1, SEGMENT_LENGTH);
+				continue;
+			}
+			#endif
 			pvo = moea64_pvo_find_va(pm, sva);
 			if (pvo != NULL)
 				moea64_pvo_remove(mmu, pvo);
+			sva += PAGE_SIZE;
 		}
 	} else {
-		LIST_FOREACH(pvo, &pm->pmap_pvo, pvo_plink) {
+		LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
 			if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
 				continue;
 			moea64_pvo_remove(mmu, pvo);
 		}
 	}
-	vm_page_unlock_queues();
+	UNLOCK_TABLE_WR();
 	PMAP_UNLOCK(pm);
 }
 
@@ -2065,8 +2132,8 @@ moea64_remove_all(mmu_t mmu, vm_page_t m
 	struct	pvo_entry *pvo, *next_pvo;
 	pmap_t	pmap;
 
-	vm_page_lock_queues();
 	pvo_head = vm_page_to_pvoh(m);
+	LOCK_TABLE_WR();
 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
 
@@ -2075,12 +2142,10 @@ moea64_remove_all(mmu_t mmu, vm_page_t m
 		moea64_pvo_remove(mmu, pvo);
 		PMAP_UNLOCK(pmap);
 	}
-	if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) {
-		moea64_attr_clear(m, LPTE_CHG);
+	UNLOCK_TABLE_WR();
+	if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m))
 		vm_page_dirty(m);
-	}
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
-	vm_page_unlock_queues();
 }
 
 /*
@@ -2156,6 +2221,9 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
 	if (!moea64_initialized)
 		bootstrap = 1;
 
+	PMAP_LOCK_ASSERT(pm, MA_OWNED);
+	rw_assert(&moea64_table_lock, RA_WLOCKED);
+
 	/*
 	 * Compute the PTE Group index.
 	 */
@@ -2167,8 +2235,6 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
 	 * there is a mapping.
 	 */
-	LOCK_TABLE();
-
 	moea64_pvo_enter_calls++;
 
 	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
@@ -2184,7 +2250,6 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
 						PVO_PTEGIDX_SET(pvo, i);
 					moea64_pte_overflow--;
 				}
-				UNLOCK_TABLE();
 				return (0);
 			}
 			moea64_pvo_remove(mmu, pvo);
@@ -2211,15 +2276,11 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
 		 * table. The mapping we are working with is already
 		 * protected by the PMAP lock.
 		 */
-		UNLOCK_TABLE();
 		pvo = uma_zalloc(zone, M_NOWAIT);
-		LOCK_TABLE();
 	}
 
-	if (pvo == NULL) {
-		UNLOCK_TABLE();
+	if (pvo == NULL)
 		return (ENOMEM);
-	}
 
 	moea64_pvo_entries++;
 	pvo->pvo_vaddr = va;
@@ -2274,8 +2335,6 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
 	if (pm == kernel_pmap)
 		isync();
 
-	UNLOCK_TABLE();
-
 #ifdef __powerpc64__
 	/*
 	 * Make sure all our bootstrap mappings are in the SLB as soon
@@ -2293,11 +2352,13 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_
 {
 	uintptr_t pt;
 
+	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
+	rw_assert(&moea64_table_lock, RA_WLOCKED);
+
 	/*
 	 * If there is an active pte entry, we need to deactivate it (and
 	 * save the ref & cfg bits).
 	 */
-	LOCK_TABLE();
 	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
 	if (pt != -1) {
 		MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
@@ -2314,19 +2375,6 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_
 		pvo->pvo_pmap->pm_stats.wired_count--;
 
 	/*
-	 * Save the REF/CHG bits into their cache if the page is managed.
-	 */
-	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
-		struct	vm_page *pg;
-
-		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
-		if (pg != NULL) {
-			moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo &
-			    (LPTE_REF | LPTE_CHG));
-		}
-	}
-
-	/*
 	 * Remove this PVO from the PV and pmap lists.
 	 */
 	LIST_REMOVE(pvo, pvo_vlink);
@@ -2338,11 +2386,27 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_
 	 */
 	LIST_REMOVE(pvo, pvo_olink);
 
+	/*
+	 * Update vm about the REF/CHG bits if the page is managed.
+	 */
+	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED &&
+	    (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
+		struct	vm_page *pg;
+
+		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
+		if (pg != NULL) {
+			if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
+				vm_page_dirty(pg);
+			if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
+				vm_page_aflag_set(pg, PGA_REFERENCED);
+			if (LIST_EMPTY(vm_page_to_pvoh(pg)))
+				vm_page_aflag_clear(pg, PGA_WRITEABLE);
+		}
+	}
+
 	moea64_pvo_entries--;
 	moea64_pvo_remove_calls++;
 
-	UNLOCK_TABLE();
-
 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
 		uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
 		    moea64_upvo_zone, pvo);
@@ -2380,12 +2444,10 @@ moea64_pvo_find_va(pmap_t pm, vm_offset_
 	ptegidx = va_to_pteg(vsid, va, 0);
 	#endif
 
-	LOCK_TABLE();
 	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va)
 			break;
 	}
-	UNLOCK_TABLE();
 
 	return (pvo);
 }
@@ -2396,20 +2458,13 @@ moea64_query_bit(mmu_t mmu, vm_page_t m,
 	struct	pvo_entry *pvo;
 	uintptr_t pt;
 
-	if (moea64_attr_fetch(m) & ptebit)
-		return (TRUE);
-
-	vm_page_lock_queues();
-
+	LOCK_TABLE_RD();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
-
 		/*
-		 * See if we saved the bit off.  If so, cache it and return
-		 * success.
+		 * See if we saved the bit off.  If so, return success.
 		 */
 		if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
-			moea64_attr_save(m, ptebit);
-			vm_page_unlock_queues();
+			UNLOCK_TABLE_RD();
 			return (TRUE);
 		}
 	}
@@ -2425,24 +2480,22 @@ moea64_query_bit(mmu_t mmu, vm_page_t m,
 		/*
 		 * See if this pvo has a valid PTE.  if so, fetch the
 		 * REF/CHG bits from the valid PTE.  If the appropriate
-		 * ptebit is set, cache it and return success.
+		 * ptebit is set, return success.
 		 */
-		LOCK_TABLE();
+		PMAP_LOCK(pvo->pvo_pmap);
 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
 		if (pt != -1) {
 			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
 			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
-				UNLOCK_TABLE();
-
-				moea64_attr_save(m, ptebit);
-				vm_page_unlock_queues();
+				PMAP_UNLOCK(pvo->pvo_pmap);
+				UNLOCK_TABLE_RD();
 				return (TRUE);
 			}
 		}
-		UNLOCK_TABLE();
+		PMAP_UNLOCK(pvo->pvo_pmap);
 	}
 
-	vm_page_unlock_queues();
+	UNLOCK_TABLE_RD();
 	return (FALSE);
 }
 
@@ -2453,13 +2506,6 @@ moea64_clear_bit(mmu_t mmu, vm_page_t m,
 	struct	pvo_entry *pvo;
 	uintptr_t pt;
 
-	vm_page_lock_queues();
-
-	/*
-	 * Clear the cached value.
-	 */
-	moea64_attr_clear(m, ptebit);
-
 	/*
 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
 	 * we can reset the right ones).  note that since the pvo entries and
@@ -2474,9 +2520,9 @@ moea64_clear_bit(mmu_t mmu, vm_page_t m,
 	 * valid pte clear the ptebit from the valid pte.
 	 */
 	count = 0;
+	LOCK_TABLE_RD();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
-
-		LOCK_TABLE();
+		PMAP_LOCK(pvo->pvo_pmap);
 		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
 		if (pt != -1) {
 			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
@@ -2487,10 +2533,10 @@ moea64_clear_bit(mmu_t mmu, vm_page_t m,
 			}
 		}
 		pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
-		UNLOCK_TABLE();
+		PMAP_UNLOCK(pvo->pvo_pmap);
 	}
 
-	vm_page_unlock_queues();
+	UNLOCK_TABLE_RD();
 	return (count);
 }
 
@@ -2501,6 +2547,7 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_o
 	vm_offset_t ppa;
 	int error = 0;
 
+	LOCK_TABLE_RD();
 	PMAP_LOCK(kernel_pmap);
 	for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) {
 		pvo = moea64_pvo_find_va(kernel_pmap, ppa);
@@ -2510,6 +2557,7 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_o
 			break;
 		}
 	}
+	UNLOCK_TABLE_RD();
 	PMAP_UNLOCK(kernel_pmap);
 
 	return (error);
@@ -2528,7 +2576,7 @@ moea64_mapdev_attr(mmu_t mmu, vm_offset_
 
 	ppa = trunc_page(pa);
 	offset = pa & PAGE_MASK;
-	size = roundup(offset + size, PAGE_SIZE);
+	size = roundup2(offset + size, PAGE_SIZE);
 
 	va = kmem_alloc_nofault(kernel_map, size);
 
@@ -2559,7 +2607,7 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t v
 
 	base = trunc_page(va);
 	offset = va & PAGE_MASK;
-	size = roundup(offset + size, PAGE_SIZE);
+	size = roundup2(offset + size, PAGE_SIZE);
 
 	kmem_free(kernel_map, base, size);
 }
@@ -2572,6 +2620,7 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm,
 	vm_paddr_t pa;
 	vm_size_t len;
 
+	LOCK_TABLE_RD();
 	PMAP_LOCK(pm);
 	while (sz > 0) {
 		lim = round_page(va);
@@ -2585,5 +2634,6 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm,
 		va += len;
 		sz -= len;
 	}
+	UNLOCK_TABLE_RD();
 	PMAP_UNLOCK(pm);
 }

Modified: stable/9/sys/powerpc/aim/moea64_native.c
==============================================================================
--- stable/9/sys/powerpc/aim/moea64_native.c	Thu Apr  5 00:08:16 2012	(r233910)
+++ stable/9/sys/powerpc/aim/moea64_native.c	Thu Apr  5 00:53:21 2012	(r233911)
@@ -103,6 +103,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
+#include <sys/sched.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
 
@@ -138,7 +139,7 @@ __FBSDID("$FreeBSD$");
  * Just to add to the fun, exceptions must be off as well
  * so that we can't trap in 64-bit mode. What a pain.
  */
-struct mtx	tlbie_mutex;
+static struct mtx	tlbie_mutex;
 
 static __inline void
 TLBIE(uint64_t vpn) {
@@ -151,19 +152,20 @@ TLBIE(uint64_t vpn) {
 	vpn <<= ADDR_PIDX_SHFT;
 	vpn &= ~(0xffffULL << 48);
 
-	mtx_lock_spin(&tlbie_mutex);
 #ifdef __powerpc64__
-	__asm __volatile("\
-	    ptesync; \
-	    tlbie %0; \
-	    eieio; \
-	    tlbsync; \
-	    ptesync;" 
-	:: "r"(vpn) : "memory");
+	sched_pin();

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201204050053.q350rMVK033015>