Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 22 Sep 2012 06:54:03 +0000 (UTC)
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r240803 - head/sys/arm/arm
Message-ID:  <201209220654.q8M6s3Fx009328@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: alc
Date: Sat Sep 22 06:54:03 2012
New Revision: 240803
URL: http://svn.freebsd.org/changeset/base/240803

Log:
  Since UMA_ZONE_NOFREE is specified when l2zone and l2table_zone are created,
  there is no need to release and reacquire the pmap and pvh global locks
  around calls to uma_zfree().  Recursion into the pmap simply won't occur.
  
  Eliminate the use of M_USE_RESERVE.  It is deprecated and, in fact, counter-
  productive, meaning that it actually makes the memory allocation request
  more likely to fail.
  
  Eliminate the macros pmap_{alloc,free}_l2_dtable().  They are of limited
  utility, and pmap_free_l2_dtable() was inconsistently used.
  
  Tidy up pmap_init().  In particular, change the initialization of the PV
  zone so that it doesn't span the initialization of the l2 and l2table zones.
  
  Tested by:	jmg

Modified:
  head/sys/arm/arm/pmap-v6.c
  head/sys/arm/arm/pmap.c

Modified: head/sys/arm/arm/pmap-v6.c
==============================================================================
--- head/sys/arm/arm/pmap-v6.c	Sat Sep 22 06:41:56 2012	(r240802)
+++ head/sys/arm/arm/pmap-v6.c	Sat Sep 22 06:54:03 2012	(r240803)
@@ -357,14 +357,6 @@ struct l2_dtable {
 #define	L2_NEXT_BUCKET(va)	(((va) & L1_S_FRAME) + L1_S_SIZE)
 
 /*
- * L2 allocation.
- */
-#define	pmap_alloc_l2_dtable()		\
-		(void*)uma_zalloc(l2table_zone, M_NOWAIT|M_USE_RESERVE)
-#define	pmap_free_l2_dtable(l2)		\
-		uma_zfree(l2table_zone, l2)
-
-/*
  * We try to map the page tables write-through, if possible.  However, not
  * all CPUs have a write-through cache mode, so on those we have to sync
  * the cache when we frob page tables.
@@ -621,10 +613,9 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offse
 		 * no entry in the L1 table.
 		 * Need to allocate a new l2_dtable.
 		 */
-again_l2table:
 		PMAP_UNLOCK(pm);
 		rw_wunlock(&pvh_global_lock);
-		if ((l2 = pmap_alloc_l2_dtable()) == NULL) {
+		if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) {
 			rw_wlock(&pvh_global_lock);
 			PMAP_LOCK(pm);
 			return (NULL);
@@ -632,18 +623,12 @@ again_l2table:
 		rw_wlock(&pvh_global_lock);
 		PMAP_LOCK(pm);
 		if (pm->pm_l2[L2_IDX(l1idx)] != NULL) {
-			PMAP_UNLOCK(pm);
-			rw_wunlock(&pvh_global_lock);
-			uma_zfree(l2table_zone, l2);
-			rw_wlock(&pvh_global_lock);
-			PMAP_LOCK(pm);
-			l2 = pm->pm_l2[L2_IDX(l1idx)];
-			if (l2 == NULL)
-				goto again_l2table;
 			/*
 			 * Someone already allocated the l2_dtable while
 			 * we were doing the same.
 			 */
+			uma_zfree(l2table_zone, l2);
+			l2 = pm->pm_l2[L2_IDX(l1idx)];
 		} else {
 			bzero(l2, sizeof(*l2));
 			/*
@@ -665,21 +650,14 @@ again_l2table:
 		 * No L2 page table has been allocated. Chances are, this
 		 * is because we just allocated the l2_dtable, above.
 		 */
-again_ptep:
 		PMAP_UNLOCK(pm);
 		rw_wunlock(&pvh_global_lock);
-		ptep = (void*)uma_zalloc(l2zone, M_NOWAIT|M_USE_RESERVE);
+		ptep = uma_zalloc(l2zone, M_NOWAIT);
 		rw_wlock(&pvh_global_lock);
 		PMAP_LOCK(pm);
 		if (l2b->l2b_kva != 0) {
 			/* We lost the race. */
-			PMAP_UNLOCK(pm);
-			rw_wunlock(&pvh_global_lock);
 			uma_zfree(l2zone, ptep);
-			rw_wlock(&pvh_global_lock);
-			PMAP_LOCK(pm);
-			if (l2b->l2b_kva == 0)
-				goto again_ptep;
 			return (l2b);
 		}
 		l2b->l2b_phys = vtophys(ptep);
@@ -691,7 +669,7 @@ again_ptep:
 			 */
 			if (l2->l2_occupancy == 0) {
 				pm->pm_l2[L2_IDX(l1idx)] = NULL;
-				pmap_free_l2_dtable(l2);
+				uma_zfree(l2table_zone, l2);
 			}
 			return (NULL);
 		}
@@ -789,7 +767,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2
 	 * the pointer in the parent pmap and free the l2_dtable.
 	 */
 	pm->pm_l2[L2_IDX(l1idx)] = NULL;
-	pmap_free_l2_dtable(l2);
+	uma_zfree(l2table_zone, l2);
 }
 
 /*
@@ -1175,28 +1153,25 @@ pmap_init(void)
 
 	PDEBUG(1, printf("pmap_init: phys_start = %08x\n", PHYSADDR));
 
+	l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
+	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
+	l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL,
+	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
+
 	/*
-	 * init the pv free list
+	 * Initialize the PV entry allocator.
 	 */
 	pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
-	/*
-	 * Now it is safe to enable pv_table recording.
-	 */
-	PDEBUG(1, printf("pmap_init: done!\n"));
-
 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
-
 	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
-	pv_entry_high_water = 9 * (pv_entry_max / 10);
-	l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
-	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
-	l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable),
-	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
-	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
-
 	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
+	pv_entry_high_water = 9 * (pv_entry_max / 10);
 
+	/*
+	 * Now it is safe to enable pv_table recording.
+	 */
+	PDEBUG(1, printf("pmap_init: done!\n"));
 }
 
 int
@@ -2544,7 +2519,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 }
 
 /*
- *	The page queues and pmap must be locked.
+ *	The pvh global and pmap locks must be held.
  */
 static void
 pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,

Modified: head/sys/arm/arm/pmap.c
==============================================================================
--- head/sys/arm/arm/pmap.c	Sat Sep 22 06:41:56 2012	(r240802)
+++ head/sys/arm/arm/pmap.c	Sat Sep 22 06:54:03 2012	(r240803)
@@ -366,14 +366,6 @@ struct l2_dtable {
 #define	L2_NEXT_BUCKET(va)	(((va) & L1_S_FRAME) + L1_S_SIZE)
 
 /*
- * L2 allocation.
- */
-#define	pmap_alloc_l2_dtable()		\
-		(void*)uma_zalloc(l2table_zone, M_NOWAIT|M_USE_RESERVE)
-#define	pmap_free_l2_dtable(l2)		\
-		uma_zfree(l2table_zone, l2)
-
-/*
  * We try to map the page tables write-through, if possible.  However, not
  * all CPUs have a write-through cache mode, so on those we have to sync
  * the cache when we frob page tables.
@@ -875,10 +867,9 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offse
 		 * no entry in the L1 table.
 		 * Need to allocate a new l2_dtable.
 		 */
-again_l2table:
 		PMAP_UNLOCK(pm);
 		rw_wunlock(&pvh_global_lock);
-		if ((l2 = pmap_alloc_l2_dtable()) == NULL) {
+		if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) {
 			rw_wlock(&pvh_global_lock);
 			PMAP_LOCK(pm);
 			return (NULL);
@@ -886,18 +877,12 @@ again_l2table:
 		rw_wlock(&pvh_global_lock);
 		PMAP_LOCK(pm);
 		if (pm->pm_l2[L2_IDX(l1idx)] != NULL) {
-			PMAP_UNLOCK(pm);
-			rw_wunlock(&pvh_global_lock);
-			uma_zfree(l2table_zone, l2);
-			rw_wlock(&pvh_global_lock);
-			PMAP_LOCK(pm);
-			l2 = pm->pm_l2[L2_IDX(l1idx)];
-			if (l2 == NULL)
-				goto again_l2table;
 			/*
 			 * Someone already allocated the l2_dtable while
 			 * we were doing the same.
 			 */
+			uma_zfree(l2table_zone, l2);
+			l2 = pm->pm_l2[L2_IDX(l1idx)];
 		} else {
 			bzero(l2, sizeof(*l2));
 			/*
@@ -919,21 +904,14 @@ again_l2table:
 		 * No L2 page table has been allocated. Chances are, this
 		 * is because we just allocated the l2_dtable, above.
 		 */
-again_ptep:
 		PMAP_UNLOCK(pm);
 		rw_wunlock(&pvh_global_lock);
-		ptep = (void*)uma_zalloc(l2zone, M_NOWAIT|M_USE_RESERVE);
+		ptep = uma_zalloc(l2zone, M_NOWAIT);
 		rw_wlock(&pvh_global_lock);
 		PMAP_LOCK(pm);
 		if (l2b->l2b_kva != 0) {
 			/* We lost the race. */
-			PMAP_UNLOCK(pm);
-			rw_wunlock(&pvh_global_lock);
 			uma_zfree(l2zone, ptep);
-			rw_wlock(&pvh_global_lock);
-			PMAP_LOCK(pm);
-			if (l2b->l2b_kva == 0)
-				goto again_ptep;
 			return (l2b);
 		}
 		l2b->l2b_phys = vtophys(ptep);
@@ -945,7 +923,7 @@ again_ptep:
 			 */
 			if (l2->l2_occupancy == 0) {
 				pm->pm_l2[L2_IDX(l1idx)] = NULL;
-				pmap_free_l2_dtable(l2);
+				uma_zfree(l2table_zone, l2);
 			}
 			return (NULL);
 		}
@@ -1066,7 +1044,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2
 	 * the pointer in the parent pmap and free the l2_dtable.
 	 */
 	pm->pm_l2[L2_IDX(l1idx)] = NULL;
-	pmap_free_l2_dtable(l2);
+	uma_zfree(l2table_zone, l2);
 }
 
 /*
@@ -1834,28 +1812,25 @@ pmap_init(void)
 
 	PDEBUG(1, printf("pmap_init: phys_start = %08x\n", PHYSADDR));
 
+	l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
+	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
+	l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL,
+	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
+
 	/*
-	 * init the pv free list
+	 * Initialize the PV entry allocator.
 	 */
 	pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
-	/*
-	 * Now it is safe to enable pv_table recording.
-	 */
-	PDEBUG(1, printf("pmap_init: done!\n"));
-
 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
-	
 	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
-	pv_entry_high_water = 9 * (pv_entry_max / 10);
-	l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
-	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
-	l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable),
-	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
-	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
-
 	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
+	pv_entry_high_water = 9 * (pv_entry_max / 10);
 
+	/*
+	 * Now it is safe to enable pv_table recording.
+	 */
+	PDEBUG(1, printf("pmap_init: done!\n"));
 }
 
 int
@@ -3302,7 +3277,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 }
 
 /*
- *	The page queues and pmap must be locked.
+ *	The pvh global and pmap locks must be held.
  */
 static void
 pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201209220654.q8M6s3Fx009328>