Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 7 Jan 2009 13:55:43 -0600 (CST)
From:      Mark Tinguely <tinguely@casselton.net>
To:        raj@semihalf.com, tinguely@casselton.net
Cc:        arm@freebsd.org, alfred@freebsd.org
Subject:   Re: svn commit: r186730 - in head...
Message-ID:  <200901071955.n07Jthqu057051@casselton.net>
In-Reply-To: <4964FB53.8040607@semihalf.com>

next in thread | previous in thread | raw e-mail | index | archive | help
<deleted background>

>  Mark,
>  I'd like to look at your code, I guess also Grzegorz (CC'd) who knows ARM pmap
>  code very well will be much interested in your thoughts on resolving the
>  multiple virtual mappings issue, once and for all. How can we access your said
>  proto/rough attempts?
>
>  Rafal

Here is the "svn diff" from a week or so ago. The url is below in case
the inclusion adds some extra characters.

	http://www.casselton.net/~tinguely/arm_pmap_unmanage.diff

pmap_fix_cache() - and the equivalent FreeBSD 6/7 and NetBSD pmap_vac_me_XXX
routines should be called when a mapping occurs. It will check for
situations where either multiple writers or readers/writer can occur
in the current mapping. I won't go into the side cases. Anyway, if
there is a writer/reader or multiple writers in a mapping, we have
to write back/invalidate and turn off the cache in this mapping.

The problem is the kernel mappings (via pmap_enter, pmap_qenter and pmap_kenter)
are not managed with pv_entrys. In pmap_fix_cache() we count pv_entrys to
detect multiple writers and write/read situations, so it is possible a
writable unmanage page may get missed as is. I think the real problem
is bigger, for entries mapped with pmap_qenter and pmap_kenter routines,
the caches are left on.

What my code does is manage all kernel mappings and therefore the
pmap_fix_code can do its job.

This patch uses more pv_entrys and there were new locks that had to
be added for the pv_entry allocation routines.

			arm_pmap_unmanage.diff

Index: arm/arm/pmap.c
===================================================================
--- arm/arm/pmap.c	(revision 186394)
+++ arm/arm/pmap.c	(working copy)
@@ -407,7 +407,7 @@
 
 #define pmap_is_current(pm)	((pm) == pmap_kernel() || \
             curproc->p_vmspace->vm_map.pmap == (pm))
-static uma_zone_t pvzone;
+static uma_zone_t pvzone = NULL;
 uma_zone_t l2zone;
 static uma_zone_t l2table_zone;
 static vm_offset_t pmap_kernel_l2dtable_kva;
@@ -2713,8 +2713,8 @@
 	cpu_idcache_wbinv_all();
 	cpu_l2cache_wbinv_all();
 	for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
-		if (pv->pv_flags & PVF_WIRED) {
-			/* The page is wired, cannot remove it now. */
+		if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) {
+			/* The page is wired or unmanaged, cannot remove it now. */
 			npv = TAILQ_NEXT(pv, pv_plist);
 			continue;
 		}
@@ -2824,10 +2824,12 @@
 	struct l2_bucket *l2b;
 	pt_entry_t *pte;
 	pt_entry_t opte;
+	struct pv_entry *pve;
+	vm_page_t m;
+
 	PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n",
 	    (uint32_t) va, (uint32_t) pa));
 
-
 	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 	if (l2b == NULL)
 		l2b = pmap_grow_l2_bucket(pmap_kernel(), va);
@@ -2837,10 +2839,10 @@
 	PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
 	    (uint32_t) pte, opte, *pte));
 	if (l2pte_valid(opte)) {
-		cpu_dcache_wbinv_range(va, PAGE_SIZE);
-		cpu_l2cache_wbinv_range(va, PAGE_SIZE);
-		cpu_tlb_flushD_SE(va);
-		cpu_cpwait();
+			/* remove the old mapping
+			 * note, pmap_kremove() has a lot of duplicated code
+			 */
+		pmap_kremove(va);
 	} else {
 		if (opte == 0)
 			l2b->l2b_occupancy++;
@@ -2852,6 +2854,27 @@
 	if (flags & KENTER_USER)
 		*pte |= L2_S_PROT_U;
 	PTE_SYNC(pte);
+
+		/* krenel direct mappings can be shared, so use a pv_entry
+		 * to ensure proper caching.
+		 *
+		 * with the old UMA pv allocation method, it is possible
+		 * that this routine is called before UMA is initialized.
+		 * Early allocation should not be shared.
+		 * This can change with "chunk" style pv_entrys.
+		 */
+	if (pvzone != NULL) {
+		if ((pve = pmap_get_pv_entry()) == NULL)
+			panic("pmap_kenter_internal: no pv entries");	
+
+		vm_page_lock_queues();
+		PMAP_LOCK(pmap_kernel());
+		m = PHYS_TO_VM_PAGE(vtophys(va));
+		pmap_enter_pv(m, pve, pmap_kernel(), va, PVF_WRITE | PVF_UNMAN);
+		pmap_fix_cache(m, pmap_kernel(), va);
+		PMAP_UNLOCK(pmap_kernel());
+		vm_page_unlock_queues();
+	}
 }
 
 void
@@ -2888,6 +2911,9 @@
 {
 	struct l2_bucket *l2b;
 	pt_entry_t *pte, opte;
+	struct pv_entry *pve;
+	vm_page_t m;
+	vm_offset_t pa;
 		
 	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 	if (!l2b)
@@ -2896,6 +2922,25 @@
 	pte = &l2b->l2b_kva[l2pte_index(va)];
 	opte = *pte;
 	if (l2pte_valid(opte)) {
+			/* pa = vtophs(va) taken from pmap_extract() */
+		switch (opte & L2_TYPE_MASK) {
+		case L2_TYPE_L:
+			pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
+			break;
+		default:
+			pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
+			break;
+		}
+			/* note: should never have to remove an allocation
+			 * before the pvzone is initialized.
+			 */
+		vm_page_lock_queues();
+		PMAP_LOCK(pmap_kernel());	/* XXX only PVC_UNMAN? XXX */
+		if ((m = PHYS_TO_VM_PAGE(pa)) &&
+		    (pve = pmap_remove_pv(m, pmap_kernel(), va)))
+			pmap_free_pv_entry(pve); 
+		PMAP_UNLOCK(pmap_kernel());
+		vm_page_unlock_queues();
 		cpu_dcache_wbinv_range(va, PAGE_SIZE);
 		cpu_l2cache_wbinv_range(va, PAGE_SIZE);
 		cpu_tlb_flushD_SE(va);
@@ -2917,7 +2962,7 @@
  *	update '*virt' with the first usable address after the mapped
  *	region.
  */
-vm_offset_t
+vm_offset_t 
 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
 {
 #ifdef ARM_USE_SMALL_ALLOC
@@ -3123,6 +3168,11 @@
 	pmap_remove_write(m);
 	curpm = vmspace_pmap(curproc->p_vmspace);
 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+			/* don't expect to remove a unmanaged page */
+		if (pv->pv_flag & PVC_UNMAN) {
+			printf("pmap_remove_all: Unmanaged page %08x\n",
+				(u_int32_t) m);
+		}
 		if (flush == FALSE && (pv->pv_pmap == curpm ||
 		    pv->pv_pmap == pmap_kernel()))
 			flush = TRUE;
@@ -3418,16 +3468,16 @@
 			 * Replacing an existing mapping with a new one.
 			 * It is part of our managed memory so we
 			 * must remove it from the PV list
+			 *
+			 * unmanaged kernel memory also will have a pv_entry.
 			 */
 			pve = pmap_remove_pv(opg, pmap, va);
-			if (m && (m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) &&
-			    pve)
+			if (m && (m->flags & PG_FICTITIOUS) && pve)
 				pmap_free_pv_entry(pve);
-			else if (!pve && 
-			    !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS)))
+			else if (!pve && !(m->flags & PG_FICTITIOUS))
 				pve = pmap_get_pv_entry();
-			KASSERT(pve != NULL || m->flags & (PG_UNMANAGED | 
-			    PG_FICTITIOUS), ("No pv"));
+			KASSERT(pve != NULL || m->flags & PG_FICTITIOUS),
+				 ("No pv"));
 			oflags = pve->pv_flags;
 			
 			/*
@@ -3436,8 +3486,7 @@
 			 * initially) then make sure to frob
 			 * the cache.
 			 */
-			if ((oflags & PVF_NC) == 0 &&
-			    l2pte_valid(opte)) {
+			if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) {
 				if (PV_BEEN_EXECD(oflags)) {
 					pmap_idcache_wbinv_range(pmap, va,
 					    PAGE_SIZE);
@@ -3448,13 +3497,15 @@
 						    (oflags & PVF_WRITE) == 0);
 					}
 			}
-		} else if (m && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS)))
+		} else if (m && !(m->flags & PG_FICTITIOUS))
 			if ((pve = pmap_get_pv_entry()) == NULL) {
 				panic("pmap_enter: no pv entries");	
 			}
-		if (m && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS))) {
+		if (m && !(m->flags & PG_FICTITIOUS)) {
 			KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 			    ("pmap_enter: managed mapping within the clean submap"));
+			if (m->flags & PG_UNMANAGED)
+				nflags |= PVF_UNMAN;
 			pmap_enter_pv(m, pve, pmap, va, nflags);
 		}
 	}
Index: arm/include/pmap.h
===================================================================
--- arm/include/pmap.h	(revision 186394)
+++ arm/include/pmap.h	(working copy)
@@ -495,6 +495,7 @@
 #define	PVF_EXEC	0x10		/* mapping is executable */
 #define	PVF_NC		0x20		/* mapping is non-cacheable */
 #define	PVF_MWC		0x40		/* mapping is used multiple times in userland */
+#define	PVF_UNMAN	0x80		/* mapping is unmanaged */
 
 void vector_page_setprot(int);
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200901071955.n07Jthqu057051>