Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 31 Mar 2021 01:17:37 GMT
From:      "Jason A. Harmening" <jah@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: 8dc8feb53da0 - main - Clean up a couple of MD warts in vm_fault_populate():
Message-ID:  <202103310117.12V1HbJH042232@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by jah:

URL: https://cgit.FreeBSD.org/src/commit/?id=8dc8feb53da0c1a2301cb21c87b17a09d12e8fa7

commit 8dc8feb53da0c1a2301cb21c87b17a09d12e8fa7
Author:     Jason A. Harmening <jah@FreeBSD.org>
AuthorDate: 2021-03-27 03:10:46 +0000
Commit:     Jason A. Harmening <jah@FreeBSD.org>
CommitDate: 2021-03-31 01:15:55 +0000

    Clean up a couple of MD warts in vm_fault_populate():
    
    --Eliminate a big ifdef that encompassed all currently-supported
    architectures except mips and powerpc32.  This applied to the case
    in which we've allocated a superpage but the pager-populated range
    is insufficient for a superpage mapping.  For platforms that don't
    support superpages the check should be inexpensive as we shouldn't
    get a superpage in the first place.  Make the normal-page fallback
    logic identical for all platforms and provide a simple implementation
    of pmap_ps_enabled() for MIPS and Book-E/AIM32 powerpc.
    
    --Apply the logic for handling pmap_enter() failure if a superpage
    mapping can't be supported due to additional protection policy.
    Use KERN_PROTECTION_FAILURE instead of KERN_FAILURE for this case,
    and note Intel PKU on amd64 as the first example of such protection
    policy.
    
    Reviewed by:    kib, markj, bdragon
    Differential Revision:  https://reviews.freebsd.org/D29439
---
 sys/amd64/amd64/pmap.c    |  2 +-
 sys/mips/include/pmap.h   |  6 ++++++
 sys/powerpc/aim/mmu_oea.c |  8 ++++++++
 sys/powerpc/booke/pmap.c  |  8 ++++++++
 sys/vm/vm_fault.c         | 26 +++++++++++++++-----------
 5 files changed, 38 insertions(+), 12 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index fc5f24d2c303..aa8810e05b63 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -7147,7 +7147,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
 	 */
 	if (!pmap_pkru_same(pmap, va, va + NBPDR)) {
 		pmap_abort_ptp(pmap, va, pdpg);
-		return (KERN_FAILURE);
+		return (KERN_PROTECTION_FAILURE);
 	}
 	if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86) {
 		newpde &= ~X86_PG_PKU_MASK;
diff --git a/sys/mips/include/pmap.h b/sys/mips/include/pmap.h
index 77f75903ccd5..6678f8264ad7 100644
--- a/sys/mips/include/pmap.h
+++ b/sys/mips/include/pmap.h
@@ -190,6 +190,12 @@ pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused)
 	return (0);
 }
 
+static inline bool
+pmap_ps_enabled(pmap_t pmap __unused)
+{
+	return (false);
+}
+
 #endif				/* _KERNEL */
 
 #endif				/* !LOCORE */
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 44d8b9518b8e..cb0c905a53f3 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -327,6 +327,7 @@ void moea_scan_init(void);
 vm_offset_t moea_quick_enter_page(vm_page_t m);
 void moea_quick_remove_page(vm_offset_t addr);
 boolean_t moea_page_is_mapped(vm_page_t m);
+bool moea_ps_enabled(pmap_t pmap);
 static int moea_map_user_ptr(pmap_t pm,
     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
 static int moea_decode_kernel_ptr(vm_offset_t addr,
@@ -370,6 +371,7 @@ static struct pmap_funcs moea_methods = {
 	.quick_enter_page =  moea_quick_enter_page,
 	.quick_remove_page =  moea_quick_remove_page,
 	.page_is_mapped = moea_page_is_mapped,
+	.ps_enabled = moea_ps_enabled,
 
 	/* Internal interfaces */
 	.bootstrap =        	moea_bootstrap,
@@ -1122,6 +1124,12 @@ moea_page_is_mapped(vm_page_t m)
 	return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
 }
 
+bool
+moea_ps_enabled(pmap_t pmap __unused)
+{
+	return (false);
+}
+
 /*
  * Map the given physical page at the specified virtual address in the
  * target pmap with the protection requested.  If specified the page
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 6bc96b222db8..6c48584096c5 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -354,6 +354,7 @@ static int		mmu_booke_decode_kernel_ptr(vm_offset_t addr,
     int *is_user, vm_offset_t *decoded_addr);
 static void		mmu_booke_page_array_startup(long);
 static boolean_t mmu_booke_page_is_mapped(vm_page_t m);
+static bool mmu_booke_ps_enabled(pmap_t pmap);
 
 static struct pmap_funcs mmu_booke_methods = {
 	/* pmap dispatcher interface */
@@ -396,6 +397,7 @@ static struct pmap_funcs mmu_booke_methods = {
 	.quick_remove_page =  mmu_booke_quick_remove_page,
 	.page_array_startup = mmu_booke_page_array_startup,
 	.page_is_mapped = mmu_booke_page_is_mapped,
+	.ps_enabled = mmu_booke_ps_enabled,
 
 	/* Internal interfaces */
 	.bootstrap = mmu_booke_bootstrap,
@@ -1226,6 +1228,12 @@ mmu_booke_page_is_mapped(vm_page_t m)
 	return (!TAILQ_EMPTY(&(m)->md.pv_list));
 }
 
+static bool
+mmu_booke_ps_enabled(pmap_t pmap __unused)
+{
+	return (false);
+}
+
 /*
  * Initialize pmap associated with process 0.
  */
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 585e1544415d..dd112feefdcd 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -542,17 +542,13 @@ vm_fault_populate(struct faultstate *fs)
 	    pidx <= pager_last;
 	    pidx += npages, m = vm_page_next(&m[npages - 1])) {
 		vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset;
-#if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
-    __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) || \
-    defined(__powerpc64__)
+
 		psind = m->psind;
 		if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 ||
 		    pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last ||
 		    !pmap_ps_enabled(fs->map->pmap) || fs->wired))
 			psind = 0;
-#else
-		psind = 0;
-#endif		
+
 		npages = atop(pagesizes[psind]);
 		for (i = 0; i < npages; i++) {
 			vm_fault_populate_check_page(&m[i]);
@@ -561,8 +557,18 @@ vm_fault_populate(struct faultstate *fs)
 		VM_OBJECT_WUNLOCK(fs->first_object);
 		rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type |
 		    (fs->wired ? PMAP_ENTER_WIRED : 0), psind);
-#if defined(__amd64__)
-		if (psind > 0 && rv == KERN_FAILURE) {
+
+		/*
+		 * pmap_enter() may fail for a superpage mapping if additional
+		 * protection policies prevent the full mapping.
+		 * For example, this will happen on amd64 if the entire
+		 * address range does not share the same userspace protection
+		 * key.  Revert to single-page mappings if this happens.
+		 */
+		MPASS(rv == KERN_SUCCESS ||
+		    (psind > 0 && rv == KERN_PROTECTION_FAILURE));
+		if (__predict_false(psind > 0 &&
+		    rv == KERN_PROTECTION_FAILURE)) {
 			for (i = 0; i < npages; i++) {
 				rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i),
 				    &m[i], fs->prot, fs->fault_type |
@@ -570,9 +576,7 @@ vm_fault_populate(struct faultstate *fs)
 				MPASS(rv == KERN_SUCCESS);
 			}
 		}
-#else
-		MPASS(rv == KERN_SUCCESS);
-#endif
+
 		VM_OBJECT_WLOCK(fs->first_object);
 		for (i = 0; i < npages; i++) {
 			if ((fs->fault_flags & VM_FAULT_WIRE) != 0)



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202103310117.12V1HbJH042232>