Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 7 Mar 2004 16:23:50 -0800 (PST)
From:      Juli Mallett <jmallett@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 48381 for review
Message-ID:  <200403080023.i280NoNe041323@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=48381

Change 48381 by jmallett@jmallett_oingo on 2004/03/07 16:23:32

	Try to integ from alpha.

Affected files ...

.. //depot/projects/mips/sys/mips/mips/pmap.c#29 edit

Differences ...

==== //depot/projects/mips/sys/mips/mips/pmap.c#29 (text+ko) ====

@@ -44,7 +44,6 @@
  *	from:	i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
  *		with some ideas from NetBSD's alpha pmap
  *      from:   Alpha Id: pmap.c,v 1.122 2003/04/10 18:42:06 jhb Exp
- * $FreeBSD$
  */
 
 /*
@@ -73,6 +72,9 @@
  *	and to when physical maps must be made correct.
  */
 
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
@@ -196,7 +198,6 @@
 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va,
 		vm_page_t mpte, vm_page_t m);
 
-
 /*
  * 	Routine:	pmap_pte
  * 	Function:
@@ -336,9 +337,41 @@
 	return (va);
 }
 
-/*
- * A free function for the above.
- */
+void *
+uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
+{
+	static vm_pindex_t color;
+	vm_page_t m;
+	int pflags;
+	void *va;
+
+	*flags = UMA_SLAB_PRIV;
+
+	if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
+		pflags = VM_ALLOC_INTERRUPT;
+	else
+		pflags = VM_ALLOC_SYSTEM;
+
+	if (wait & M_ZERO)
+		pflags |= VM_ALLOC_ZERO;
+
+	for (;;) {
+		m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
+		if (m == NULL) {
+			if (wait & M_NOWAIT)
+				return (NULL);
+			else
+				VM_WAIT;
+		} else
+			break;
+	}
+
+	va = (void *)MIPS_PHYS_TO_KSEG0(m->phys_addr);
+	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
+		bzero(va, PAGE_SIZE);
+	return (va);
+}
+
 void
 uma_small_free(void *mem, int size, u_int8_t flags)
 {
@@ -677,7 +710,38 @@
 pmap_pinit(pmap)
 	register struct pmap *pmap;
 {
-	pmap->pm_lev1 = kptmap; /* XXX */
+#if notyet
+	vm_page_t lev1pg;
+	int i;
+
+	/*
+	 * allocate object for the ptes
+	 */
+	if (pmap->pm_pteobj == NULL)
+		pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, NUSERLEV3MAPS + NUSERLEV2MAPS + 1);
+
+	/*
+	 * allocate the page directory page
+	 */
+	VM_OBJECT_LOCK(pmap->pm_pteobj);
+	lev1pg = vm_page_grab(pmap->pm_pteobj, NUSERLEV3MAPS + NUSERLEV2MAPS,
+	    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
+
+	vm_page_lock_queues();
+	vm_page_flag_clear(lev1pg, PG_BUSY);
+	lev1pg->valid = VM_PAGE_BITS_ALL;
+	vm_page_unlock_queues();
+	VM_OBJECT_UNLOCK(pmap->pm_pteobj);
+
+	pmap->pm_lev1 = (pt_entry_t*) MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(lev1pg));
+	if ((lev1pg->flags & PG_ZERO) == 0)
+		bzero(pmap->pm_lev1, PAGE_SIZE);
+
+
+	/* install self-referential address mapping entry (not PG_ASM) */
+	pmap->pm_lev1[PTLEV1I] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(lev1pg))
+		| PG_V | PG_KRE | PG_KWE;
+#endif
 	pmap->pm_ptphint = NULL;
 	pmap->pm_active = 0;
 	pmap->pm_asid = 0;
@@ -687,18 +751,9 @@
 	mtx_lock_spin(&allpmaps_lock);
 	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 	mtx_unlock_spin(&allpmaps_lock);
-}
-
-/*
- * Wire in kernel global address entries.  To avoid a race condition
- * between pmap initialization and pmap_growkernel, this procedure
- * should be called after the vmspace is attached to the process
- * but before this pmap is activated.
- */
-void
-pmap_pinit2(pmap)
-	struct pmap *pmap;
-{
+#if notyet
+	bcopy(PTlev1 + K1SEGLEV1I, pmap->pm_lev1 + K1SEGLEV1I, nklev2 * PTESIZE);
+#endif
 }
 
 /***************************************************
@@ -1147,7 +1202,37 @@
 	if (wired)
 		pmap->pm_stats.wired_count++;
 
-	tlb_enter(pmap, va, pa, PG_V | (wired ? PG_W : 0));
+validate:
+	/*
+	 * Now validate mapping with desired protection/wiring.
+	 */
+	newpte = pmap_phys_to_pte(pa) | pte_prot(pmap, prot) | PG_V | managed;
+
+	if (managed) {
+		/*
+		 * Set up referenced/modified emulation for the new
+		 * mapping. Any old referenced/modified emulation
+		 * results for the old mapping will have been recorded
+		 * either in pmap_remove_pte() or above in the code
+		 * which handles protection and/or wiring changes.
+		 */
+		newpte |= (PG_FOR | PG_FOW | PG_FOE);
+	}
+
+	if (wired)
+		newpte |= PG_W;
+
+	/*
+	 * if the mapping or permission bits are different, we need
+	 * to update the pte.
+	 */
+	if (origpte != newpte) {
+		*pte = newpte;
+		if (origpte)
+			pmap_invalidate_page(pmap, va);
+		if (prot & VM_PROT_EXECUTE)
+			alpha_pal_imb();
+	}
 }
 
 /*
@@ -1608,23 +1693,30 @@
 		 */
 		if ((*pte & PG_RO) == 0)
 			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
-		/*
-		 * Modified by someone
-		 */
-		else if (m->dirty || pmap_is_modified(m))
-			val |= MINCORE_MODIFIED_OTHER;
+		else {
+			/*
+			 * Modified by someone
+			 */
+			vm_page_lock_queues();
+			if (m->dirty || pmap_is_modified(m))
+				val |= MINCORE_MODIFIED_OTHER;
+			vm_page_unlock_queues();
+		}
 		/*
 		 * Referenced by us
 		 */
 		if ((*pte & PG_D) == 0)
 			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
-
-		/*
-		 * Referenced by someone
-		 */
-		else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
-			val |= MINCORE_REFERENCED_OTHER;
-			vm_page_flag_set(m, PG_REFERENCED);
+		else {
+			/*
+			 * Referenced by someone
+			 */
+			vm_page_lock_queues();
+			if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
+				val |= MINCORE_REFERENCED_OTHER;
+				vm_page_flag_set(m, PG_REFERENCED);
+			}
+			vm_page_unlock_queues();
 		}
 	} 
 	return val;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200403080023.i280NoNe041323>