From owner-svn-src-head@freebsd.org Mon Oct 23 15:34:07 2017 Return-Path: Delivered-To: svn-src-head@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 5A02DE4E380; Mon, 23 Oct 2017 15:34:07 +0000 (UTC) (envelope-from markj@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 3455E7449F; Mon, 23 Oct 2017 15:34:07 +0000 (UTC) (envelope-from markj@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id v9NFY6Nx019812; Mon, 23 Oct 2017 15:34:06 GMT (envelope-from markj@FreeBSD.org) Received: (from markj@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id v9NFY68r019808; Mon, 23 Oct 2017 15:34:06 GMT (envelope-from markj@FreeBSD.org) Message-Id: <201710231534.v9NFY68r019808@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: markj set sender to markj@FreeBSD.org using -f From: Mark Johnston Date: Mon, 23 Oct 2017 15:34:06 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r324920 - in head/sys: amd64/amd64 arm/arm arm64/arm64 i386/i386 X-SVN-Group: head X-SVN-Commit-Author: markj X-SVN-Commit-Paths: in head/sys: amd64/amd64 arm/arm arm64/arm64 i386/i386 X-SVN-Commit-Revision: 324920 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 23 Oct 2017 15:34:07 -0000 Author: markj Date: Mon Oct 23 15:34:05 2017 New Revision: 324920 URL: https://svnweb.freebsd.org/changeset/base/324920 Log: Fix the VM_NRESERVLEVEL == 0 build. Add VM_NRESERVLEVEL guards in the pmaps that implement transparent superpage promotion using reservations. Reviewed by: alc, kib MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D12764 Modified: head/sys/amd64/amd64/pmap.c head/sys/arm/arm/pmap-v6.c head/sys/arm64/arm64/pmap.c head/sys/i386/i386/pmap.c Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Mon Oct 23 11:12:09 2017 (r324919) +++ head/sys/amd64/amd64/pmap.c Mon Oct 23 15:34:05 2017 (r324920) @@ -604,8 +604,10 @@ static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_ struct rwlock **lockp); static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags, struct rwlock **lockp); +#if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp); +#endif static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); @@ -628,8 +630,10 @@ static void pmap_invalidate_pde_page(pmap_t pmap, vm_o pd_entry_t pde); static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask); +#if VM_NRESERVLEVEL > 0 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp); +#endif static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask); @@ -3359,6 +3363,7 @@ out: PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1)); } +#if VM_NRESERVLEVEL > 0 /* * After promotion from 512 4KB page mappings to a single 2MB page mapping, * replace the many pv entries for the 4KB page mappings by a single pv entry @@ -3399,6 +3404,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_pa pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * First find and then destroy the pv entry for the specified pmap and virtual @@ -4243,6 +4249,7 @@ retry: PMAP_UNLOCK(pmap); } +#if VM_NRESERVLEVEL > 0 /* * Tries to promote the 512, contiguous 4KB page mappings that are within a * single page table page (PTP) to a single 2MB page mapping. For promotion @@ -4371,6 +4378,7 @@ setpte: CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx" " in pmap %p", va, pmap); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * Insert the given physical page (p) at @@ -4599,6 +4607,7 @@ validate: unchanged: +#if VM_NRESERVLEVEL > 0 /* * If both the page table page and the reservation are fully * populated, then attempt promotion. @@ -4608,6 +4617,7 @@ unchanged: (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pde(pmap, pde, va, &lock); +#endif rv = KERN_SUCCESS; out: @@ -7171,7 +7181,9 @@ pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t v { int rv; struct rwlock *lock; +#if VM_NRESERVLEVEL > 0 vm_page_t m, mpte; +#endif pd_entry_t *pde; pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V; @@ -7226,6 +7238,7 @@ pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t v *pte |= PG_A; } +#if VM_NRESERVLEVEL > 0 /* try to promote the mapping */ if (va < VM_MAXUSER_ADDRESS) mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); @@ -7243,6 +7256,8 @@ pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t v atomic_add_long(&ad_emulation_superpage_promotions, 1); #endif } +#endif + #ifdef INVARIANTS if (ftype == VM_PROT_WRITE) atomic_add_long(&num_dirty_emulations, 1); Modified: head/sys/arm/arm/pmap-v6.c ============================================================================== --- head/sys/arm/arm/pmap-v6.c Mon Oct 23 11:12:09 2017 (r324919) +++ head/sys/arm/arm/pmap-v6.c Mon Oct 23 15:34:05 2017 (r324920) @@ -3165,6 +3165,7 @@ pmap_pv_demote_pte1(pmap_t pmap, vm_offset_t va, vm_pa } while (va < va_last); } +#if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) { @@ -3198,6 +3199,7 @@ pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_p pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } +#endif /* * Conditionally create a pv entry. @@ -3405,6 +3407,7 @@ pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_o } #endif +#if VM_NRESERVLEVEL > 0 /* * Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are * within a single page table page (PT2) to a single 1MB page mapping. @@ -3532,6 +3535,7 @@ pmap_promote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_ PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * Zero L2 page table page. @@ -4053,6 +4057,8 @@ validate: va, opte2, npte2); } #endif + +#if VM_NRESERVLEVEL > 0 /* * If both the L2 page table page and the reservation are fully * populated, then attempt promotion. @@ -4061,6 +4067,7 @@ validate: sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pte1(pmap, pte1p, va); +#endif sched_unpin(); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); Modified: head/sys/arm64/arm64/pmap.c ============================================================================== --- head/sys/arm64/arm64/pmap.c Mon Oct 23 11:12:09 2017 (r324919) +++ head/sys/arm64/arm64/pmap.c Mon Oct 23 15:34:05 2017 (r324920) @@ -105,6 +105,8 @@ __FBSDID("$FreeBSD$"); * and to when physical maps must be made correct. */ +#include "opt_vm.h" + #include #include #include @@ -2677,6 +2679,7 @@ pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_ent intr_restore(intr); } +#if VM_NRESERVLEVEL > 0 /* * After promotion from 512 4KB page mappings to a single 2MB page mapping, * replace the many pv entries for the 4KB page mappings by a single pv entry @@ -2790,6 +2793,7 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va, pmap); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * Insert the given physical page (p) at @@ -3045,12 +3049,14 @@ validate: (prot & VM_PROT_EXECUTE) != 0) cpu_icache_sync_range(va, PAGE_SIZE); +#if VM_NRESERVLEVEL > 0 if ((mpte == NULL || mpte->wire_count == NL3PG) && pmap_superpages_enabled() && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { pmap_promote_l2(pmap, pde, va, &lock); } +#endif } if (lock != NULL) Modified: head/sys/i386/i386/pmap.c ============================================================================== --- head/sys/i386/i386/pmap.c Mon Oct 23 11:12:09 2017 (r324919) +++ head/sys/i386/i386/pmap.c Mon Oct 23 15:34:05 2017 (r324920) @@ -100,6 +100,7 @@ __FBSDID("$FreeBSD$"); #include "opt_cpu.h" #include "opt_pmap.h" #include "opt_smp.h" +#include "opt_vm.h" #include "opt_xbox.h" #include @@ -288,7 +289,9 @@ static void free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); +#if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); +#endif static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); @@ -309,7 +312,9 @@ static boolean_t pmap_is_referenced_pvh(struct md_page static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde); static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); +#if VM_NRESERVLEVEL > 0 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); +#endif static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); @@ -2504,6 +2509,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_pad } while (va < va_last); } +#if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) { @@ -2537,6 +2543,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_pa pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } +#endif /* VM_NRESERVLEVEL > 0 */ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) @@ -3312,6 +3319,7 @@ retry: PMAP_UNLOCK(pmap); } +#if VM_NRESERVLEVEL > 0 /* * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are * within a single page table page (PTP) to a single 2- or 4MB page mapping. @@ -3448,6 +3456,7 @@ setpte: CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x" " in pmap %p", va, pmap); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * Insert the given physical page (p) at @@ -3664,6 +3673,7 @@ validate: pte_store(pte, newpte); } +#if VM_NRESERVLEVEL > 0 /* * If both the page table page and the reservation are fully * populated, then attempt promotion. @@ -3672,6 +3682,7 @@ validate: pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pde(pmap, pde, va); +#endif sched_unpin(); rw_wunlock(&pvh_global_lock);