Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 20 Sep 2011 08:11:07 +0000 (UTC)
From:      Attilio Rao <attilio@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-user@freebsd.org
Subject:   svn commit: r225691 - in user/attilio/vmcontention/sys/sparc64: include sparc64
Message-ID:  <201109200811.p8K8B7af017443@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: attilio
Date: Tue Sep 20 08:11:07 2011
New Revision: 225691
URL: http://svn.freebsd.org/changeset/base/225691

Log:
  Replace the vm_page_queue_lock mis-usage with a global MD page rwlock
  that protects the TTE list and page coloring bits.
  Additively, bring out of any locking some calles to vm_page_aflag_clear()
  as it is no longer necessary now.
  
  Discussed with:	alc
  Reviewed by:	alc
  Tested by:	flo

Modified:
  user/attilio/vmcontention/sys/sparc64/include/pmap.h
  user/attilio/vmcontention/sys/sparc64/sparc64/pmap.c
  user/attilio/vmcontention/sys/sparc64/sparc64/tsb.c

Modified: user/attilio/vmcontention/sys/sparc64/include/pmap.h
==============================================================================
--- user/attilio/vmcontention/sys/sparc64/include/pmap.h	Tue Sep 20 08:01:43 2011	(r225690)
+++ user/attilio/vmcontention/sys/sparc64/include/pmap.h	Tue Sep 20 08:11:07 2011	(r225691)
@@ -43,6 +43,7 @@
 #include <sys/_cpuset.h>
 #include <sys/_lock.h>
 #include <sys/_mutex.h>
+#include <sys/_rwlock.h>
 #include <machine/cache.h>
 #include <machine/tte.h>
 
@@ -78,6 +79,12 @@ struct pmap {
 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
 
+#define	MDPAGE_ASSERT_WLOCKED()	rw_assert(&md_page_rwlock, RA_WLOCKED)
+#define	MDPAGE_RLOCK()		rw_rlock(&md_page_rwlock)
+#define	MDPAGE_WLOCK()		rw_wlock(&md_page_rwlock)
+#define	MDPAGE_RUNLOCK()	rw_runlock(&md_page_rwlock)
+#define	MDPAGE_WUNLOCK()	rw_wunlock(&md_page_rwlock)
+
 #define	pmap_page_get_memattr(m)	VM_MEMATTR_DEFAULT
 #define	pmap_page_set_memattr(m, ma)	(void)0
 
@@ -101,6 +108,7 @@ void	pmap_set_kctx(void);
 
 extern	struct pmap kernel_pmap_store;
 #define	kernel_pmap	(&kernel_pmap_store)
+extern	struct rwlock md_page_rwlock;
 extern	vm_paddr_t phys_avail[];
 extern	vm_offset_t virtual_avail;
 extern	vm_offset_t virtual_end;

Modified: user/attilio/vmcontention/sys/sparc64/sparc64/pmap.c
==============================================================================
--- user/attilio/vmcontention/sys/sparc64/sparc64/pmap.c	Tue Sep 20 08:01:43 2011	(r225690)
+++ user/attilio/vmcontention/sys/sparc64/sparc64/pmap.c	Tue Sep 20 08:11:07 2011	(r225691)
@@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/msgbuf.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
+#include <sys/rwlock.h>
 #include <sys/smp.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
@@ -188,6 +189,11 @@ extern int tl1_immu_miss_patch_tsb_mask_
 extern int tl1_immu_miss_patch_tsb_mask_2[];
 
 /*
+ * MD page read/write lock.
+ */
+struct rwlock md_page_rwlock;
+
+/*
  * If user pmap is processed with pmap_remove and with pmap_remove and the
  * resident count drops to 0, there are no more pages to remove, so we
  * need not continue.
@@ -679,6 +685,11 @@ pmap_bootstrap(u_int cpu_impl)
 	CPU_FILL(&pm->pm_active);
 
 	/*
+	 * Initialize the MD page lock.
+	 */
+	rw_init(&md_page_rwlock, "MD page");
+
+	/*
 	 * Flush all non-locked TLB entries possibly left over by the
 	 * firmware.
 	 */
@@ -888,7 +899,7 @@ pmap_cache_enter(vm_page_t m, vm_offset_
 	struct tte *tp;
 	int color;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	MDPAGE_ASSERT_WLOCKED();
 	KASSERT((m->flags & PG_FICTITIOUS) == 0,
 	    ("pmap_cache_enter: fake page"));
 	PMAP_STATS_INC(pmap_ncache_enter);
@@ -963,7 +974,7 @@ pmap_cache_remove(vm_page_t m, vm_offset
 	struct tte *tp;
 	int color;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	MDPAGE_ASSERT_WLOCKED();
 	CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
 	    m->md.colors[DCACHE_COLOR(va)]);
 	KASSERT((m->flags & PG_FICTITIOUS) == 0,
@@ -1038,7 +1049,7 @@ pmap_kenter(vm_offset_t va, vm_page_t m)
 	vm_page_t om;
 	u_long data;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	MDPAGE_ASSERT_WLOCKED();
 	PMAP_STATS_INC(pmap_nkenter);
 	tp = tsb_kvtotte(va);
 	CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
@@ -1100,7 +1111,7 @@ pmap_kremove(vm_offset_t va)
 	struct tte *tp;
 	vm_page_t m;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	MDPAGE_ASSERT_WLOCKED();
 	PMAP_STATS_INC(pmap_nkremove);
 	tp = tsb_kvtotte(va);
 	CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
@@ -1151,19 +1162,16 @@ void
 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
 {
 	vm_offset_t va;
-	int locked;
 
 	PMAP_STATS_INC(pmap_nqenter);
 	va = sva;
-	if (!(locked = mtx_owned(&vm_page_queue_mtx)))
-		vm_page_lock_queues();
+	MDPAGE_WLOCK();
 	while (count-- > 0) {
 		pmap_kenter(va, *m);
 		va += PAGE_SIZE;
 		m++;
 	}
-	if (!locked)
-		vm_page_unlock_queues();
+	MDPAGE_WUNLOCK();
 	tlb_range_demap(kernel_pmap, sva, va);
 }
 
@@ -1175,18 +1183,15 @@ void
 pmap_qremove(vm_offset_t sva, int count)
 {
 	vm_offset_t va;
-	int locked;
 
 	PMAP_STATS_INC(pmap_nqremove);
 	va = sva;
-	if (!(locked = mtx_owned(&vm_page_queue_mtx)))
-		vm_page_lock_queues();
+	MDPAGE_WLOCK();
 	while (count-- > 0) {
 		pmap_kremove(va);
 		va += PAGE_SIZE;
 	}
-	if (!locked)
-		vm_page_unlock_queues();
+	MDPAGE_WUNLOCK();
 	tlb_range_demap(kernel_pmap, sva, va);
 }
 
@@ -1327,7 +1332,7 @@ pmap_remove_tte(struct pmap *pm, struct 
 	vm_page_t m;
 	u_long data;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	MDPAGE_ASSERT_WLOCKED();
 	data = atomic_readandclear_long(&tp->tte_data);
 	if ((data & TD_FAKE) == 0) {
 		m = PHYS_TO_VM_PAGE(TD_PA(data));
@@ -1364,7 +1369,7 @@ pmap_remove(pmap_t pm, vm_offset_t start
 	    pm->pm_context[curcpu], start, end);
 	if (PMAP_REMOVE_DONE(pm))
 		return;
-	vm_page_lock_queues();
+	MDPAGE_WLOCK();
 	PMAP_LOCK(pm);
 	if (end - start > PMAP_TSB_THRESH) {
 		tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
@@ -1377,7 +1382,7 @@ pmap_remove(pmap_t pm, vm_offset_t start
 		tlb_range_demap(pm, start, end - 1);
 	}
 	PMAP_UNLOCK(pm);
-	vm_page_unlock_queues();
+	MDPAGE_WUNLOCK();
 }
 
 void
@@ -1390,7 +1395,7 @@ pmap_remove_all(vm_page_t m)
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_remove_all: page %p is not managed", m));
-	vm_page_lock_queues();
+	MDPAGE_WLOCK();
 	for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
 		tpn = TAILQ_NEXT(tp, tte_link);
 		if ((tp->tte_data & TD_PV) == 0)
@@ -1412,8 +1417,8 @@ pmap_remove_all(vm_page_t m)
 		TTE_ZERO(tp);
 		PMAP_UNLOCK(pm);
 	}
+	MDPAGE_WUNLOCK();
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
-	vm_page_unlock_queues();
 }
 
 static int
@@ -1475,11 +1480,11 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm
     vm_prot_t prot, boolean_t wired)
 {
 
-	vm_page_lock_queues();
+	MDPAGE_WLOCK();
 	PMAP_LOCK(pm);
 	pmap_enter_locked(pm, va, m, prot, wired);
-	vm_page_unlock_queues();
 	PMAP_UNLOCK(pm);
+	MDPAGE_WUNLOCK();
 }
 
 /*
@@ -1498,7 +1503,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t
 	vm_page_t real;
 	u_long data;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	MDPAGE_ASSERT_WLOCKED();
 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
 	KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
 	    VM_OBJECT_LOCKED(m->object),
@@ -1641,27 +1646,27 @@ pmap_enter_object(pmap_t pm, vm_offset_t
 
 	psize = atop(end - start);
 	m = m_start;
-	vm_page_lock_queues();
+	MDPAGE_WLOCK();
 	PMAP_LOCK(pm);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 		pmap_enter_locked(pm, start + ptoa(diff), m, prot &
 		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
 		m = TAILQ_NEXT(m, listq);
 	}
-	vm_page_unlock_queues();
 	PMAP_UNLOCK(pm);
+	MDPAGE_WUNLOCK();
 }
 
 void
 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 {
 
-	vm_page_lock_queues();
+	MDPAGE_WLOCK();
 	PMAP_LOCK(pm);
 	pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
 	    FALSE);
-	vm_page_unlock_queues();
 	PMAP_UNLOCK(pm);
+	MDPAGE_WUNLOCK();
 }
 
 void
@@ -1706,6 +1711,8 @@ pmap_copy_tte(pmap_t src_pmap, pmap_t ds
 	vm_page_t m;
 	u_long data;
 
+	MDPAGE_ASSERT_WLOCKED();
+
 	if ((tp->tte_data & TD_FAKE) != 0)
 		return (1);
 	if (tsb_tte_lookup(dst_pmap, va) == NULL) {
@@ -1726,7 +1733,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
 
 	if (dst_addr != src_addr)
 		return;
-	vm_page_lock_queues();
+	MDPAGE_WLOCK();
 	if (dst_pmap < src_pmap) {
 		PMAP_LOCK(dst_pmap);
 		PMAP_LOCK(src_pmap);
@@ -1744,9 +1751,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
 				pmap_copy_tte(src_pmap, dst_pmap, tp, va);
 		tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
 	}
-	vm_page_unlock_queues();
 	PMAP_UNLOCK(src_pmap);
 	PMAP_UNLOCK(dst_pmap);
+	MDPAGE_WUNLOCK();
 }
 
 void
@@ -1943,7 +1950,7 @@ pmap_page_exists_quick(pmap_t pm, vm_pag
 	    ("pmap_page_exists_quick: page %p is not managed", m));
 	loops = 0;
 	rv = FALSE;
-	vm_page_lock_queues();
+	MDPAGE_RLOCK();
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 		if ((tp->tte_data & TD_PV) == 0)
 			continue;
@@ -1954,7 +1961,7 @@ pmap_page_exists_quick(pmap_t pm, vm_pag
 		if (++loops >= 16)
 			break;
 	}
-	vm_page_unlock_queues();
+	MDPAGE_RUNLOCK();
 	return (rv);
 }
 
@@ -1971,11 +1978,11 @@ pmap_page_wired_mappings(vm_page_t m)
 	count = 0;
 	if ((m->oflags & VPO_UNMANAGED) != 0)
 		return (count);
-	vm_page_lock_queues();
+	MDPAGE_RLOCK();
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
 		if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
 			count++;
-	vm_page_unlock_queues();
+	MDPAGE_RUNLOCK();
 	return (count);
 }
 
@@ -2002,13 +2009,13 @@ pmap_page_is_mapped(vm_page_t m)
 	rv = FALSE;
 	if ((m->oflags & VPO_UNMANAGED) != 0)
 		return (rv);
-	vm_page_lock_queues();
+	MDPAGE_RLOCK();
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
 		if ((tp->tte_data & TD_PV) != 0) {
 			rv = TRUE;
 			break;
 		}
-	vm_page_unlock_queues();
+	MDPAGE_RUNLOCK();
 	return (rv);
 }
 
@@ -2034,7 +2041,7 @@ pmap_ts_referenced(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_ts_referenced: page %p is not managed", m));
 	count = 0;
-	vm_page_lock_queues();
+	MDPAGE_WLOCK();
 	if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
 		tpf = tp;
 		do {
@@ -2048,7 +2055,7 @@ pmap_ts_referenced(vm_page_t m)
 				break;
 		} while ((tp = tpn) != NULL && tp != tpf);
 	}
-	vm_page_unlock_queues();
+	MDPAGE_WUNLOCK();
 	return (count);
 }
 
@@ -2071,7 +2078,7 @@ pmap_is_modified(vm_page_t m)
 	if ((m->oflags & VPO_BUSY) == 0 &&
 	    (m->aflags & PGA_WRITEABLE) == 0)
 		return (rv);
-	vm_page_lock_queues();
+	MDPAGE_RLOCK();
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 		if ((tp->tte_data & TD_PV) == 0)
 			continue;
@@ -2080,7 +2087,7 @@ pmap_is_modified(vm_page_t m)
 			break;
 		}
 	}
-	vm_page_unlock_queues();
+	MDPAGE_RUNLOCK();
 	return (rv);
 }
 
@@ -2114,7 +2121,7 @@ pmap_is_referenced(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_is_referenced: page %p is not managed", m));
 	rv = FALSE;
-	vm_page_lock_queues();
+	MDPAGE_RLOCK();
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 		if ((tp->tte_data & TD_PV) == 0)
 			continue;
@@ -2123,7 +2130,7 @@ pmap_is_referenced(vm_page_t m)
 			break;
 		}
 	}
-	vm_page_unlock_queues();
+	MDPAGE_RUNLOCK();
 	return (rv);
 }
 
@@ -2146,7 +2153,7 @@ pmap_clear_modify(vm_page_t m)
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
-	vm_page_lock_queues();
+	MDPAGE_RLOCK();
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 		if ((tp->tte_data & TD_PV) == 0)
 			continue;
@@ -2154,7 +2161,7 @@ pmap_clear_modify(vm_page_t m)
 		if ((data & TD_W) != 0)
 			tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
 	}
-	vm_page_unlock_queues();
+	MDPAGE_RUNLOCK();
 }
 
 void
@@ -2165,7 +2172,7 @@ pmap_clear_reference(vm_page_t m)
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_reference: page %p is not managed", m));
-	vm_page_lock_queues();
+	MDPAGE_RLOCK();
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 		if ((tp->tte_data & TD_PV) == 0)
 			continue;
@@ -2173,7 +2180,7 @@ pmap_clear_reference(vm_page_t m)
 		if ((data & TD_REF) != 0)
 			tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
 	}
-	vm_page_unlock_queues();
+	MDPAGE_RUNLOCK();
 }
 
 void
@@ -2194,7 +2201,7 @@ pmap_remove_write(vm_page_t m)
 	if ((m->oflags & VPO_BUSY) == 0 &&
 	    (m->aflags & PGA_WRITEABLE) == 0)
 		return;
-	vm_page_lock_queues();
+	MDPAGE_RLOCK();
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 		if ((tp->tte_data & TD_PV) == 0)
 			continue;
@@ -2204,8 +2211,8 @@ pmap_remove_write(vm_page_t m)
 			tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
 		}
 	}
+	MDPAGE_RUNLOCK();
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
-	vm_page_unlock_queues();
 }
 
 int

Modified: user/attilio/vmcontention/sys/sparc64/sparc64/tsb.c
==============================================================================
--- user/attilio/vmcontention/sys/sparc64/sparc64/tsb.c	Tue Sep 20 08:01:43 2011	(r225690)
+++ user/attilio/vmcontention/sys/sparc64/sparc64/tsb.c	Tue Sep 20 08:11:07 2011	(r225691)
@@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
+#include <sys/rwlock.h>
 #include <sys/smp.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
@@ -131,7 +132,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm
 			PMAP_STATS_INC(tsb_nenter_u_oc);
 	}
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	MDPAGE_ASSERT_WLOCKED();
 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
 	if (pm == kernel_pmap) {
 		PMAP_STATS_INC(tsb_nenter_k);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201109200811.p8K8B7af017443>