Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 1 Jan 2008 06:29:57 GMT
From:      Kip Macy <kmacy@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 132224 for review
Message-ID:  <200801010629.m016TvZL013532@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=132224

Change 132224 by kmacy@pandemonium:kmacy:xen31 on 2008/01/01 06:29:42

	reduce the delta between i386/pmap.c and xen/pmap.c
	fix some whitespace issues
	this allows us to get part way in to single-user

Affected files ...

.. //depot/projects/xen31/sys/i386/xen/pmap.c#9 edit

Differences ...

==== //depot/projects/xen31/sys/i386/xen/pmap.c#9 (text+ko) ====

@@ -166,7 +166,7 @@
 #endif
 
 #if !defined(PMAP_DIAGNOSTIC)
-#define PMAP_INLINE __inline
+#define PMAP_INLINE	__gnu89_inline
 #else
 #define PMAP_INLINE
 #endif
@@ -295,7 +295,6 @@
 static void pmap_pte_release(pt_entry_t *pte);
 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
-
 #ifdef PAE
 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
 #endif
@@ -304,26 +303,26 @@
 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
 
 void 
-pd_set(struct pmap *pmap, vm_paddr_t *ptr, vm_paddr_t val, int type)
+pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type)
 {
-	vm_paddr_t shadow_pdir_ma = pmap->pm_pdir[PTDPTDI] & ~0xFFF;
-	vm_paddr_t shadow_offset = (vm_paddr_t)(ptr - pmap->pm_pdir)*sizeof(vm_paddr_t);
+	vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]);
+	vm_paddr_t shadow_pdir_ma = vtomach(&pmap->pm_pdir_shadow[ptepindex]);
 	
 	switch (type) {
 	case SH_PD_SET_VA:
-		xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 
+		xen_queue_pt_update(shadow_pdir_ma,
 				    xpmap_ptom(val & ~(PG_RW|PG_M)));
-		xen_queue_pt_update(vtomach(ptr),
+		xen_queue_pt_update(pdir_ma,
 				    xpmap_ptom(val)); 	
 		break;
 	case SH_PD_SET_VA_MA:
-		xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 
+		xen_queue_pt_update(shadow_pdir_ma,
 				    val & ~(PG_RW|PG_M));
-		xen_queue_pt_update(vtomach(ptr), val); 	
+		xen_queue_pt_update(pdir_ma, val); 	
 		break;
 	case SH_PD_SET_VA_CLEAR:
-		xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 0);
-		xen_queue_pt_update(vtomach(ptr), 0); 	
+		xen_queue_pt_update(shadow_pdir_ma, 0);
+		xen_queue_pt_update(pdir_ma, 0); 	
 		break;
 	}
 }
@@ -926,22 +925,18 @@
 {
 	pd_entry_t newpf;
 	pd_entry_t *pde;
-	pd_entry_t tmppf;
-	
+
 	pde = pmap_pde(pmap, va);
-	if (PT_GET(pde) & PG_PS)
+	if (*pde & PG_PS)
 		return (pde);
-	if (PT_GET(pde) != 0) {
+	if (*pde != 0) {
 		/* are we current address space or kernel? */
 		if (pmap_is_current(pmap))
 			return (vtopte(va));
 		mtx_lock(&PMAP2mutex);
-		newpf = PT_GET(pde) & PG_FRAME;
-		tmppf = PT_GET(PMAP2) & PG_FRAME;
-		
-		if (tmppf != newpf) {
-			PT_SET_VA(PMAP2, newpf | PG_V | PG_A | PG_M/* XXX does PG_M cause problems? */, TRUE);
-
+		newpf = *pde & PG_FRAME;
+		if ((*PMAP2 & PG_FRAME) != newpf) {
+			PT_SET_VA_MA(PMAP2, newpf | PG_V | PG_A | PG_M, TRUE);
 			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
 		}
 		return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
@@ -984,8 +979,7 @@
 {
 	pd_entry_t newpf;
 	pd_entry_t *pde;
-	pd_entry_t tmppf;
-	
+
 	pde = pmap_pde(pmap, va);
 	if (*pde & PG_PS)
 		return (pde);
@@ -995,11 +989,9 @@
 			return (vtopte(va));
 		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 		KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
-		
-		newpf = PT_GET(pde) & PG_FRAME;
-		tmppf = PT_GET(PMAP1) & PG_FRAME;
-		if (tmppf != newpf) {
-			PT_SET_VA(PMAP1, newpf | PG_V | PG_A | PG_M/* ??? */, TRUE);
+		newpf = *pde & PG_FRAME;
+		if ((PT_GET(PMAP1) & PG_FRAME) != newpf) {
+			PT_SET_VA_MA(PMAP1, newpf | PG_V | PG_A, TRUE);
 #ifdef SMP
 			PMAP1cpu = PCPU_GET(cpuid);
 #endif
@@ -1034,10 +1026,10 @@
 
 	rtval = 0;
 	PMAP_LOCK(pmap);
-	pde  = PT_GET(&pmap->pm_pdir[va >> PDRSHIFT]);
+	pde = pmap->pm_pdir[va >> PDRSHIFT];
 	if (pde != 0) {
 		if ((pde & PG_PS) != 0) {
-			rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
+			rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK);
 			PMAP_UNLOCK(pmap);
 			return rtval;
 		}
@@ -1260,7 +1252,7 @@
 /***************************************************
  * Page table page management routines.....
  ***************************************************/
-static PMAP_INLINE void
+static __inline void
 pmap_free_zero_pages(vm_page_t free)
 {
 	vm_page_t m;
@@ -1276,7 +1268,7 @@
  * This routine unholds page table pages, and if the hold count
  * drops to zero, then it decrements the wire count.
  */
-static PMAP_INLINE int
+static __inline int
 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 {
 
@@ -1296,7 +1288,7 @@
 	 * unmap the page table page
 	 */
 	xen_pt_unpin(pmap->pm_pdir[m->pindex]);	
-	PD_CLEAR_VA(pmap, &pmap->pm_pdir[m->pindex], TRUE);
+	PD_CLEAR_VA(pmap, m->pindex, TRUE);
 	--pmap->pm_stats.resident_count;
 
 	/*
@@ -1486,7 +1478,7 @@
 static vm_page_t
 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
 {
-	vm_paddr_t ptepa;
+	vm_paddr_t ptema;
 	vm_page_t m;
 
 	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
@@ -1522,10 +1514,10 @@
 
 	pmap->pm_stats.resident_count++;
 
-	ptepa = VM_PAGE_TO_PHYS(m);
-	xen_pt_pin(xpmap_ptom(ptepa));
-	PD_SET_VA(pmap, &pmap->pm_pdir[ptepindex], 
-		(ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE);
+	ptema = xpmap_ptom(VM_PAGE_TO_PHYS(m));
+	xen_pt_pin(ptema);
+	PD_SET_VA_MA(pmap, ptepindex,
+		(ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE);
 
 	return m;
 }
@@ -1534,7 +1526,7 @@
 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 {
 	unsigned ptepindex;
-	pd_entry_t ptepa;
+	pd_entry_t ptema;
 	vm_page_t m;
 
 	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
@@ -1549,15 +1541,18 @@
 	/*
 	 * Get the page directory entry
 	 */
-	ptepa = PT_GET(&pmap->pm_pdir[ptepindex]);
+	ptema = pmap->pm_pdir[ptepindex];
 
 	/*
 	 * This supports switching from a 4MB page to a
 	 * normal 4K page.
 	 */
-	if (ptepa & PG_PS) {
+	if (ptema & PG_PS) {
+		/*
+		 * XXX 
+		 */
 		pmap->pm_pdir[ptepindex] = 0;
-		ptepa = 0;
+		ptema = 0;
 		pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 		pmap_invalidate_all(kernel_pmap);
 	}
@@ -1566,8 +1561,8 @@
 	 * If the page table page is mapped, we just increment the
 	 * hold count, and activate it.
 	 */
-	if (ptepa) {
-		m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
+	if (ptema) {
+		m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME);
 		m->wire_count++;
 	} else {
 		/*
@@ -1603,7 +1598,7 @@
 	u_int mymask = PCPU_GET(cpumask);
 
 #ifdef COUNT_IPIS
-	*ipi_lazypmap_counts[PCPU_GET(cpuid)]++;
+	(*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
 #endif
 	if (rcr3() == lazyptd)
 		load_cr3(PCPU_GET(curpcb)->pcb_cr3);
@@ -1705,7 +1700,7 @@
 
 	ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir));
 	for (i = 0; i < nkpt + NPGPTD; i++) 
-		PD_CLEAR_VA(pmap, &pmap->pm_pdir[PTDPTDI + i], FALSE);
+		PD_CLEAR_VA(pmap, PTDPTDI + i, FALSE);
 
 	
 	bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
@@ -1719,7 +1714,7 @@
 		/* unpinning L1 and L2 treated the same */
                 xen_pgd_unpin(ma);
 #ifdef PAE
-		KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
+		KASSERT(xpmap_ptom(VM_PAGE_TO_PHYS(m)) == (pmap->pm_pdpt[i] & PG_FRAME),
 		    ("pmap_release: got wrong ptd page"));
 #endif
 		m->wire_count--;
@@ -1800,12 +1795,18 @@
 		pmap_zero_page(nkpg);
 		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
 		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
+#ifdef notyet		
 		PD_SET_VA(kernel_pmap, &pdir_pde(kernel_pmap->pm_pdir, kernel_vm_end), newpdir, TRUE);
-
+#else
+		panic("implement me");
+#endif		
+		
 		mtx_lock_spin(&allpmaps_lock);
 		LIST_FOREACH(pmap, &allpmaps, pm_list) {
 			pde = pmap_pde(pmap, kernel_vm_end);
+#ifdef notyet			
 			PD_SET_VA(pmap, pde, newpdir, FALSE);
+#endif			
 		}
 		PT_UPDATES_FLUSH();
 		mtx_unlock_spin(&allpmaps_lock);
@@ -1982,6 +1983,7 @@
 	static const struct timeval printinterval = { 60, 0 };
 	static struct timeval lastprint;
 	static vm_pindex_t colour;
+	struct vpgqueues *pq;
 	int bit, field;
 	pv_entry_t pv;
 	struct pv_chunk *pc;
@@ -1996,6 +1998,8 @@
 			printf("Approaching the limit on PV entries, consider "
 			    "increasing either the vm.pmap.shpgperproc or the "
 			    "vm.pmap.pv_entry_max tunable.\n");
+	pq = NULL;
+retry:
 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 	if (pc != NULL) {
 		for (field = 0; field < _NPCM; field++) {
@@ -2019,21 +2023,17 @@
 			return (pv);
 		}
 	}
-	pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
-	m = vm_page_alloc(NULL, colour, VM_ALLOC_NORMAL |
-	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
-	if (m == NULL || pc == NULL) {
+	/*
+	 * Access to the ptelist "pv_vafree" is synchronized by the page
+	 * queues lock.  If "pv_vafree" is currently non-empty, it will
+	 * remain non-empty until pmap_ptelist_alloc() completes.
+	 */
+	if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
+	    &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
+	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 		if (try) {
 			pv_entry_count--;
 			PV_STAT(pc_chunk_tryfail++);
-			if (m) {
-				vm_page_lock_queues();
-				vm_page_unwire(m, 0);
-				vm_page_free(m);
-				vm_page_unlock_queues();
-			}
-			if (pc)
-				pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 			return (NULL);
 		}
 		/*
@@ -2041,30 +2041,21 @@
 		 * inactive pages.  After that, if a pv chunk entry
 		 * is still needed, destroy mappings to active pages.
 		 */
-		PV_STAT(pmap_collect_inactive++);
-		pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]);
-		if (m == NULL)
-			m = vm_page_alloc(NULL, colour, VM_ALLOC_NORMAL |
-			    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
-		if (pc == NULL)
-			pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
-		if (m == NULL || pc == NULL) {
+		if (pq == NULL) {
+			PV_STAT(pmap_collect_inactive++);
+			pq = &vm_page_queues[PQ_INACTIVE];
+		} else if (pq == &vm_page_queues[PQ_INACTIVE]) {
 			PV_STAT(pmap_collect_active++);
-			pmap_collect(pmap, &vm_page_queues[PQ_ACTIVE]);
-			if (m == NULL)
-				m = vm_page_alloc(NULL, colour,
-				    VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
-				    VM_ALLOC_WIRED);
-			if (pc == NULL)
-				pc = (struct pv_chunk *)
-				    pmap_ptelist_alloc(&pv_vafree);
-			if (m == NULL || pc == NULL)
-				panic("get_pv_entry: increase vm.pmap.shpgperproc");
-		}
+			pq = &vm_page_queues[PQ_ACTIVE];
+		} else
+			panic("get_pv_entry: increase vm.pmap.shpgperproc");
+		pmap_collect(pmap, pq);
+		goto retry;
 	}
 	PV_STAT(pc_chunk_count++);
 	PV_STAT(pc_chunk_allocs++);
 	colour++;
+	pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
 	pmap_qenter((vm_offset_t)pc, &m, 1);
 	pc->pc_pmap = pmap;
 	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
@@ -2251,7 +2242,7 @@
 		 * Check for large page.
 		 */
 		if ((ptpaddr & PG_PS) != 0) {
-			PD_CLEAR_VA(pmap, &pmap->pm_pdir[pdirindex], TRUE);
+			PD_CLEAR_VA(pmap, pdirindex, TRUE);
 			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 			anyvalid = 1;
 			continue;
@@ -2438,7 +2429,7 @@
 			 * size, PG_RW, PG_A, and PG_M are among the least
 			 * significant 32 bits.
 			 */
-			obits = pbits = *pte;
+			obits = pbits = PT_GET(pte);
 			if ((pbits & PG_V) == 0)
 				continue;
 			if (pbits & PG_MANAGED) {
@@ -2548,12 +2539,10 @@
 		}
 	}
 #endif
-
 	pde = pmap_pde(pmap, va);
 	if ((*pde & PG_PS) != 0)
 		panic("pmap_enter: attempted pmap_enter on 4MB page");
 	pte = pmap_pte_quick(pmap, va);
-
 	/*
 	 * Page Directory table entry not valid, we need a new PT page
 	 */
@@ -2564,9 +2553,13 @@
 
 	pa = VM_PAGE_TO_PHYS(m);
 	om = NULL;
-	origpte = PT_GET(pte);
-	opa = origpte & PG_FRAME;
-
+	opa = origpte = 0;
+	
+	if (*pte & PG_V) {
+		origpte = PT_GET(pte);
+		opa = origpte & PG_FRAME;
+	}
+	
 	/*
 	 * Mapping has not changed, must be protection or wiring change.
 	 */
@@ -2766,7 +2759,7 @@
 	 */
 	if (va < VM_MAXUSER_ADDRESS) {
 		unsigned ptepindex;
-		pd_entry_t ptepa;
+		pd_entry_t ptema;
 
 		/*
 		 * Calculate pagetable page index
@@ -2778,16 +2771,16 @@
 			/*
 			 * Get the page directory entry
 			 */
-			ptepa = pmap->pm_pdir[ptepindex];
+			ptema = pmap->pm_pdir[ptepindex];
 
 			/*
 			 * If the page table page is mapped, we just increment
 			 * the hold count, and activate it.
 			 */
-			if (ptepa) {
-				if (ptepa & PG_PS)
+			if (ptema) {
+				if (ptema & PG_PS)
 					panic("pmap_enter_quick: unexpected mapping into 4MB page");
-				mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
+				mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME);
 				mpte->wire_count++;
 			} else {
 				mpte = _pmap_allocpte(pmap, ptepindex,
@@ -2807,7 +2800,7 @@
 	 * But that isn't as quick as vtopte.
 	 */
 	pte = vtopte(va);
-	if (PT_GET(pte)) {
+	if (*pte) {
 		if (mpte != NULL) {
 			mpte->wire_count--;
 			mpte = NULL;
@@ -2929,7 +2922,7 @@
 		pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
 		npdes = size >> PDRSHIFT;
 		for(i = 0; i < npdes; i++) {
-			PD_SET_VA(pmap, &pmap->pm_pdir[ptepindex],
+			PD_SET_VA(pmap, ptepindex,
 			    ptepa | PG_U | PG_RW | PG_V | PG_PS, FALSE);
 			ptepa += NBPDR;
 			ptepindex += 1;
@@ -3025,7 +3018,7 @@
 			
 		if (srcptepaddr & PG_PS) {
 			if (dst_pmap->pm_pdir[ptepindex] == 0) {
-				PD_SET_VA(dst_pmap, &dst_pmap->pm_pdir[ptepindex], srcptepaddr & ~PG_W, TRUE);
+				PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE);
 				dst_pmap->pm_stats.resident_count +=
 				    NBPDR / PAGE_SIZE;
 			}
@@ -3042,7 +3035,7 @@
 		src_pte = vtopte(addr);
 		while (addr < pdnxt) {
 			pt_entry_t ptetemp;
-			ptetemp = *src_pte;
+			ptetemp = PT_GET(src_pte);
 			/*
 			 * we only virtual copy managed pages
 			 */
@@ -3238,36 +3231,36 @@
 	return (FALSE);
 }
 
-/* 
- *      pmap_page_wired_mappings: 
- * 
- *      Return the number of managed mappings to the given physical page 
- *      that are wired. 
- */ 
-int 
-pmap_page_wired_mappings(vm_page_t m) 
-{ 
-        pv_entry_t pv; 
-        pt_entry_t *pte; 
-        pmap_t pmap; 
-        int count; 
- 
-        count = 0; 
-        if ((m->flags & PG_FICTITIOUS) != 0) 
-                return (count); 
-        mtx_assert(&vm_page_queue_mtx, MA_OWNED); 
-        sched_pin(); 
-        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 
-                pmap = PV_PMAP(pv); 
-                PMAP_LOCK(pmap); 
-                pte = pmap_pte_quick(pmap, pv->pv_va); 
-                if ((*pte & PG_W) != 0) 
-                        count++; 
-                PMAP_UNLOCK(pmap); 
-        } 
-        sched_unpin(); 
-        return (count); 
-} 
+/*
+ *	pmap_page_wired_mappings:
+ *
+ *	Return the number of managed mappings to the given physical page
+ *	that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+	pv_entry_t pv;
+	pt_entry_t *pte;
+	pmap_t pmap;
+	int count;
+
+	count = 0;
+	if ((m->flags & PG_FICTITIOUS) != 0)
+		return (count);
+	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	sched_pin();
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		pte = pmap_pte_quick(pmap, pv->pv_va);
+		if ((*pte & PG_W) != 0)
+			count++;
+		PMAP_UNLOCK(pmap);
+	}
+	sched_unpin();
+	return (count);
+}
 
 /*
  * Remove all pages from specified address space
@@ -3826,7 +3819,7 @@
 	
 	PMAP_LOCK(pmap);
 	ptep = pmap_pte(pmap, addr);
-	pte = (ptep != NULL) ? *ptep : 0;
+	pte = (ptep != NULL) ? PT_GET(ptep) : 0;
 	pmap_pte_release(ptep);
 	PMAP_UNLOCK(pmap);
 	if (PMAP2_inuse) {
@@ -3960,7 +3953,7 @@
 						if (pte && pmap_pte_v(pte)) {
 							pt_entry_t pa;
 							vm_page_t m;
-							pa = *pte;
+							pa = PT_GET(pte);
 							m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
 							printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
 								va, pa, m->hold_count, m->wire_count, m->flags);
@@ -3986,7 +3979,7 @@
 #if defined(DEBUG)
 
 static void	pads(pmap_t pm);
-void		pmap_pvdump(vm_offset_t pa);
+void		pmap_pvdump(vm_paddr_t pa);
 
 /* print address space of pmap*/
 static void



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200801010629.m016TvZL013532>