Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 21 Jan 2012 18:38:57 +0000 (UTC)
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org
Subject:   svn commit: r230433 - stable/8/sys/i386/xen
Message-ID:  <201201211838.q0LIcvMG004607@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: alc
Date: Sat Jan 21 18:38:57 2012
New Revision: 230433
URL: http://svn.freebsd.org/changeset/base/230433

Log:
  MFC r228746
    The Xen pmap doesn't support superpages.  So, there is no point in it
    initializing structures, like the pv table, that are only used to
    implement superpages.  In fact, some of the unnecessary code in
    pmap_init() was actually doing harm.  It was preventing the kernel from
    booting on virtual machines with more than 768 MB of memory.
  
  Note: The change to pmap_page_is_mapped() differs slightly from r228746
    because of differences in how the page queues lock is used in
    FreeBSD 8.x.

Modified:
  stable/8/sys/i386/xen/pmap.c
Directory Properties:
  stable/8/sys/   (props changed)
  stable/8/sys/amd64/include/xen/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)
  stable/8/sys/contrib/dev/acpica/   (props changed)
  stable/8/sys/contrib/pf/   (props changed)

Modified: stable/8/sys/i386/xen/pmap.c
==============================================================================
--- stable/8/sys/i386/xen/pmap.c	Sat Jan 21 18:21:44 2012	(r230432)
+++ stable/8/sys/i386/xen/pmap.c	Sat Jan 21 18:38:57 2012	(r230433)
@@ -184,9 +184,6 @@ __FBSDID("$FreeBSD$");
 #define PV_STAT(x)	do { } while (0)
 #endif
 
-#define	pa_index(pa)	((pa) >> PDRSHIFT)
-#define	pa_to_pvh(pa)	(&pv_table[pa_index(pa)])
-
 /*
  * Get PDEs and PTEs for user/kernel address space
  */
@@ -233,7 +230,6 @@ static int pat_works;			/* Is page attri
  * Data for the pv entry allocation mechanism
  */
 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
-static struct md_page *pv_table;
 static int shpgperproc = PMAP_SHPGPERPROC;
 
 struct pv_chunk *pv_chunkbase;		/* KVA block for pv_chunks */
@@ -282,9 +278,6 @@ SYSCTL_INT(_debug, OID_AUTO, PMAP1unchan
 static struct mtx PMAP2mutex;
 
 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
-static int pg_ps_enabled;
-SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
-    "Are large page mappings enabled?");
 
 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
 	"Max number of PV entries");
@@ -687,24 +680,8 @@ pmap_ptelist_init(vm_offset_t *head, voi
 void
 pmap_init(void)
 {
-	vm_page_t mpte;
-	vm_size_t s;
-	int i, pv_npg;
 
 	/*
-	 * Initialize the vm page array entries for the kernel pmap's
-	 * page table pages.
-	 */ 
-	for (i = 0; i < nkpt; i++) {
-		mpte = PHYS_TO_VM_PAGE(xpmap_mtop(PTD[i + KPTDI] & PG_FRAME));
-		KASSERT(mpte >= vm_page_array &&
-		    mpte < &vm_page_array[vm_page_array_size],
-		    ("pmap_init: page table page is out of range"));
-		mpte->pindex = i + KPTDI;
-		mpte->phys_addr = xpmap_mtop(PTD[i + KPTDI] & PG_FRAME);
-	}
-
-        /*
 	 * Initialize the address space (zone) for the pv entries.  Set a
 	 * high water mark so that the system can recover from excessive
 	 * numbers of pv entries.
@@ -715,26 +692,6 @@ pmap_init(void)
 	pv_entry_max = roundup(pv_entry_max, _NPCPV);
 	pv_entry_high_water = 9 * (pv_entry_max / 10);
 
-	/*
-	 * Are large page mappings enabled?
-	 */
-	TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
-
-	/*
-	 * Calculate the size of the pv head table for superpages.
-	 */
-	for (i = 0; phys_avail[i + 1]; i += 2);
-	pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR;
-
-	/*
-	 * Allocate memory for the pv head table for superpages.
-	 */
-	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
-	s = round_page(s);
-	pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
-	for (i = 0; i < pv_npg; i++)
-		TAILQ_INIT(&pv_table[i].pv_list);
-
 	pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
 	pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
 	    PAGE_SIZE * pv_maxchunks);
@@ -3537,22 +3494,16 @@ pmap_page_wired_mappings(vm_page_t m)
 }
 
 /*
- * Returns TRUE if the given page is mapped individually or as part of
- * a 4mpage.  Otherwise, returns FALSE.
+ * Returns TRUE if the given page is mapped.  Otherwise, returns FALSE.
  */
 boolean_t
 pmap_page_is_mapped(vm_page_t m)
 {
-	struct md_page *pvh;
 
 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 		return (FALSE);
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	if (TAILQ_EMPTY(&m->md.pv_list)) {
-		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
-		return (!TAILQ_EMPTY(&pvh->pv_list));
-	} else
-		return (TRUE);
+	return (!TAILQ_EMPTY(&m->md.pv_list));
 }
 
 /*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201201211838.q0LIcvMG004607>