Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 27 Jul 2018 15:46:34 +0000 (UTC)
From:      Mark Johnston <markj@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r336764 - in head/sys: amd64/amd64 i386/i386 vm
Message-ID:  <201807271546.w6RFkYoA031410@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: markj
Date: Fri Jul 27 15:46:34 2018
New Revision: 336764
URL: https://svnweb.freebsd.org/changeset/base/336764

Log:
  Fix handling of KVA in kmem_bootstrap_free().
  
  Do not use vm_map_remove() to release KVA back to the system.  Because
  kernel map entries do not have an associated VM object, with r336030
  the vm_map_remove() call will not update the kernel page tables.  Avoid
  relying on the vm_map layer and instead update the pmap and release KVA
  to the kernel arena directly in kmem_bootstrap_free().
  
  Because the pmap updates will generally result in superpage demotions,
  modify pmap_init() to insert PTPs shadowed by superpage mappings into
  the kernel pmap's radix tree.
  
  While here, port r329171 to i386.
  
  Reported by:	alc
  Reviewed by:	alc, kib
  X-MFC with:	r336505
  Sponsored by:	The FreeBSD Foundation
  Differential Revision:	https://reviews.freebsd.org/D16426

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/i386/i386/pmap.c
  head/sys/vm/vm_kern.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Fri Jul 27 15:31:20 2018	(r336763)
+++ head/sys/amd64/amd64/pmap.c	Fri Jul 27 15:46:34 2018	(r336764)
@@ -372,6 +372,8 @@ static u_int64_t	DMPDphys;	/* phys addr of direct mapp
 static u_int64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
 static int		ndmpdpphys;	/* number of DMPDPphys pages */
 
+static vm_paddr_t	KERNend;	/* phys addr of end of bootstrap data */
+
 /*
  * pmap_mapdev support pre initialization (i.e. console)
  */
@@ -998,8 +1000,9 @@ create_pagetables(vm_paddr_t *firstaddr)
 	/* Map from zero to end of allocations under 2M pages */
 	/* This replaces some of the KPTphys entries above */
 	for (i = 0; (i << PDRSHIFT) < *firstaddr; i++)
+		/* Preset PG_M and PG_A because demotion expects it. */
 		pd_p[i] = (i << PDRSHIFT) | X86_PG_V | PG_PS | pg_g |
-		    bootaddr_rwx(i << PDRSHIFT);
+		    X86_PG_M | X86_PG_A | bootaddr_rwx(i << PDRSHIFT);
 
 	/*
 	 * Because we map the physical blocks in 2M pages, adjust firstaddr
@@ -1091,6 +1094,8 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
 	pt_entry_t *pte;
 	int i;
 
+	KERNend = *firstaddr;
+
 	if (!pti)
 		pg_g = X86_PG_G;
 
@@ -1323,6 +1328,7 @@ pmap_init(void)
 	 * Initialize the vm page array entries for the kernel pmap's
 	 * page table pages.
 	 */ 
+	PMAP_LOCK(kernel_pmap);
 	for (i = 0; i < nkpt; i++) {
 		mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
 		KASSERT(mpte >= vm_page_array &&
@@ -1331,7 +1337,11 @@ pmap_init(void)
 		mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
 		mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
 		mpte->wire_count = 1;
+		if (i << PDRSHIFT < KERNend &&
+		    pmap_insert_pt_page(kernel_pmap, mpte))
+			panic("pmap_init: pmap_insert_pt_page failed");
 	}
+	PMAP_UNLOCK(kernel_pmap);
 	vm_wire_add(nkpt);
 
 	/*

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c	Fri Jul 27 15:31:20 2018	(r336763)
+++ head/sys/i386/i386/pmap.c	Fri Jul 27 15:46:34 2018	(r336764)
@@ -931,6 +931,7 @@ pmap_init(void)
 	 * Initialize the vm page array entries for the kernel pmap's
 	 * page table pages.
 	 */ 
+	PMAP_LOCK(kernel_pmap);
 	for (i = 0; i < NKPT; i++) {
 		mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i));
 		KASSERT(mpte >= vm_page_array &&
@@ -938,7 +939,14 @@ pmap_init(void)
 		    ("pmap_init: page table page is out of range"));
 		mpte->pindex = i + KPTDI;
 		mpte->phys_addr = KPTphys + ptoa(i);
+		mpte->wire_count = 1;
+		if (pseflag != 0 &&
+		    KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend &&
+		    pmap_insert_pt_page(kernel_pmap, mpte))
+			panic("pmap_init: pmap_insert_pt_page failed");
 	}
+	PMAP_UNLOCK(kernel_pmap);
+	vm_wire_add(NKPT);
 
 	/*
 	 * Initialize the address space (zone) for the pv entries.  Set a

Modified: head/sys/vm/vm_kern.c
==============================================================================
--- head/sys/vm/vm_kern.c	Fri Jul 27 15:31:20 2018	(r336763)
+++ head/sys/vm/vm_kern.c	Fri Jul 27 15:46:34 2018	(r336764)
@@ -700,16 +700,15 @@ kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
 {
 #if defined(__i386__) || defined(__amd64__)
 	struct vm_domain *vmd;
-	vm_offset_t end;
+	vm_offset_t end, va;
 	vm_paddr_t pa;
 	vm_page_t m;
 
 	end = trunc_page(start + size);
 	start = round_page(start);
 
-	(void)vm_map_remove(kernel_map, start, end);
-	for (; start < end; start += PAGE_SIZE) {
-		pa = pmap_kextract(start);
+	for (va = start; va < end; va += PAGE_SIZE) {
+		pa = pmap_kextract(va);
 		m = PHYS_TO_VM_PAGE(pa);
 
 		vmd = vm_pagequeue_domain(m);
@@ -717,6 +716,8 @@ kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
 		vm_phys_free_pages(m, 0);
 		vm_domain_free_unlock(vmd);
 	}
+	pmap_remove(kernel_pmap, start, end);
+	(void)vmem_add(kernel_arena, start, end - start, M_WAITOK);
 #endif
 }
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201807271546.w6RFkYoA031410>