From owner-dev-commits-src-branches@freebsd.org Wed Sep 1 13:31:09 2021 Return-Path: Delivered-To: dev-commits-src-branches@mailman.nyi.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.nyi.freebsd.org (Postfix) with ESMTP id C616966B0F5; Wed, 1 Sep 2021 13:31:09 +0000 (UTC) (envelope-from git@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest SHA256 client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "R3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 4H04hF4z1bz3hPh; Wed, 1 Sep 2021 13:31:09 +0000 (UTC) (envelope-from git@FreeBSD.org) Received: from gitrepo.freebsd.org (gitrepo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:5]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest SHA256) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 93102259F1; Wed, 1 Sep 2021 13:31:09 +0000 (UTC) (envelope-from git@FreeBSD.org) Received: from gitrepo.freebsd.org ([127.0.1.44]) by gitrepo.freebsd.org (8.16.1/8.16.1) with ESMTP id 181DV9S3057083; Wed, 1 Sep 2021 13:31:09 GMT (envelope-from git@gitrepo.freebsd.org) Received: (from git@localhost) by gitrepo.freebsd.org (8.16.1/8.16.1/Submit) id 181DV9Wa057082; Wed, 1 Sep 2021 13:31:09 GMT (envelope-from git) Date: Wed, 1 Sep 2021 13:31:09 GMT Message-Id: <202109011331.181DV9Wa057082@gitrepo.freebsd.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-branches@FreeBSD.org From: Mark Johnston Subject: git: bfff99f7d15c - stable/13 - factor out PT page allocation/freeing MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit X-Git-Committer: markj X-Git-Repository: src X-Git-Refname: refs/heads/stable/13 X-Git-Reftype: branch X-Git-Commit: bfff99f7d15ca4e4b426d31bbf400cbd4b404374 Auto-Submitted: auto-generated X-BeenThere: dev-commits-src-branches@freebsd.org X-Mailman-Version: 2.1.34 Precedence: list List-Id: Commits to the stable branches of the FreeBSD src repository List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 01 Sep 2021 13:31:09 -0000 The branch stable/13 has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=bfff99f7d15ca4e4b426d31bbf400cbd4b404374 commit bfff99f7d15ca4e4b426d31bbf400cbd4b404374 Author: Jason A. Harmening AuthorDate: 2021-03-01 16:42:05 +0000 Commit: Mark Johnston CommitDate: 2021-09-01 13:29:01 +0000 factor out PT page allocation/freeing As follow-on work to e4b8deb222278b2a, move page table page allocation and freeing into their own functions. Use these functions to provide separate kernel vs. user page table page accounting, and to wrap common tasks such as management of zero-filled page state. Requested by: markj, kib Reviewed by: kib (cherry picked from commit c2460d7cfe9fab30459ce495f08544a237a5baa3) --- sys/amd64/amd64/pmap.c | 201 +++++++++++++++++++++++++++---------------------- 1 file changed, 109 insertions(+), 92 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 3086aa0b053f..1d0b6aa0cce1 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -115,6 +115,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -762,9 +763,15 @@ static COUNTER_U64_DEFINE_EARLY(pv_page_count); SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_page_count, CTLFLAG_RD, &pv_page_count, "Current number of allocated pv pages"); -static COUNTER_U64_DEFINE_EARLY(pt_page_count); -SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pt_page_count, CTLFLAG_RD, - &pt_page_count, "Current number of allocated page table pages"); +static COUNTER_U64_DEFINE_EARLY(user_pt_page_count); +SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, user_pt_page_count, CTLFLAG_RD, + &user_pt_page_count, + "Current number of allocated page table pages for userspace"); + +static COUNTER_U64_DEFINE_EARLY(kernel_pt_page_count); +SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, kernel_pt_page_count, CTLFLAG_RD, + &kernel_pt_page_count, + "Current number of allocated page table pages for the kernel"); #ifdef PV_STATS @@ -1291,6 +1298,9 @@ static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *); +static vm_page_t pmap_alloc_pt_page(pmap_t, vm_pindex_t, int); +static void pmap_free_pt_page(pmap_t, vm_page_t, bool); + /********************/ /* Inline functions */ /********************/ @@ -1457,22 +1467,26 @@ pmap_pte(pmap_t pmap, vm_offset_t va) } static __inline void -pmap_resident_count_inc(pmap_t pmap, int count) +pmap_resident_count_adj(pmap_t pmap, int count) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); + KASSERT(pmap->pm_stats.resident_count + count >= 0, + ("pmap %p resident count underflow %ld %d", pmap, + pmap->pm_stats.resident_count, count)); pmap->pm_stats.resident_count += count; } static __inline void -pmap_resident_count_dec(pmap_t pmap, int count) +pmap_pt_page_count_adj(pmap_t pmap, int count) { - - PMAP_LOCK_ASSERT(pmap, MA_OWNED); - KASSERT(pmap->pm_stats.resident_count >= count, - ("pmap %p resident count underflow %ld %d", pmap, - pmap->pm_stats.resident_count, count)); - pmap->pm_stats.resident_count -= count; + if (pmap == kernel_pmap) + counter_u64_add(kernel_pt_page_count, count); + else { + if (pmap != NULL) + pmap_resident_count_adj(pmap, count); + counter_u64_add(user_pt_page_count, count); + } } PMAP_INLINE pt_entry_t * @@ -2138,6 +2152,7 @@ pmap_bootstrap_la57(void *arg __unused) kernel_pmap->pm_cr3 = KPML5phys; kernel_pmap->pm_pmltop = v_pml5; + pmap_pt_page_count_adj(kernel_pmap, 1); } SYSINIT(la57, SI_SUB_KMEM, SI_ORDER_ANY, pmap_bootstrap_la57, NULL); @@ -4004,7 +4019,6 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) pd = pmap_pde(pmap, va); *pd = 0; } - pmap_resident_count_dec(pmap, 1); if (m->pindex < NUPDE) { /* We just released a PT, unhold the matching PD */ pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME); @@ -4019,7 +4033,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) pmap_unwire_ptp(pmap, va, pml4pg, free); } - counter_u64_add(pt_page_count, -1); + pmap_pt_page_count_adj(pmap, -1); /* * Put page on a list so that it is released after @@ -4184,6 +4198,44 @@ pmap_pinit_pml5_pti(vm_page_t pml5pgu) pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE); } +/* Allocate a page table page and do related bookkeeping */ +static vm_page_t +pmap_alloc_pt_page(pmap_t pmap, vm_pindex_t pindex, int flags) +{ + vm_page_t m; + + m = vm_page_alloc(NULL, pindex, flags | VM_ALLOC_NOOBJ); + if (__predict_false(m == NULL)) + return (NULL); + + pmap_pt_page_count_adj(pmap, 1); + + if ((flags & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) + pmap_zero_page(m); + + return (m); +} + +static void +pmap_free_pt_page(pmap_t pmap, vm_page_t m, bool zerofilled) +{ + /* + * This function assumes the page will need to be unwired, + * even though the counterpart allocation in pmap_alloc_pt_page() + * doesn't enforce VM_ALLOC_WIRED. However, all current uses + * of pmap_free_pt_page() require unwiring. The case in which + * a PT page doesn't require unwiring because its ref_count has + * naturally reached 0 is handled through _pmap_unwire_ptp(). + */ + vm_page_unwire_noq(m); + if (zerofilled) + vm_page_free_zero(m); + else + vm_page_free(m); + + pmap_pt_page_count_adj(pmap, -1); +} + /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. @@ -4198,11 +4250,9 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) /* * allocate the page directory page */ - pmltop_pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | + pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK); - counter_u64_add(pt_page_count, 1); - pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg); pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys); @@ -4215,8 +4265,6 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) pmap->pm_pmltopu = NULL; pmap->pm_type = pm_type; - if ((pmltop_pg->flags & PG_ZERO) == 0) - pagezero(pmap->pm_pmltop); /* * Do not install the host kernel mappings in the nested page @@ -4232,9 +4280,9 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) else pmap_pinit_pml4(pmltop_pg); if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) { - pmltop_pgu = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_WAITOK); - counter_u64_add(pt_page_count, 1); + pmltop_pgu = pmap_alloc_pt_page(NULL, 0, + VM_ALLOC_WIRED | VM_ALLOC_NORMAL | + VM_ALLOC_WAITOK); pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP( VM_PAGE_TO_PHYS(pmltop_pgu)); if (pmap_is_la57(pmap)) @@ -4419,13 +4467,11 @@ pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp, /* * Allocate a page table page. */ - if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) + m = pmap_alloc_pt_page(pmap, ptepindex, + VM_ALLOC_WIRED | VM_ALLOC_ZERO); + if (m == NULL) return (NULL); - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - /* * Map the pagetable page into the process address space, if * it isn't already there. @@ -4452,8 +4498,7 @@ pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp, /* Wire up a new PDPE page */ pml4 = pmap_allocpte_getpml4(pmap, lockp, va, true); if (pml4 == NULL) { - vm_page_unwire_noq(m); - vm_page_free_zero(m); + pmap_free_pt_page(pmap, m, true); return (NULL); } KASSERT((*pml4 & PG_V) == 0, @@ -4480,8 +4525,7 @@ pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp, /* Wire up a new PDE page */ pdp = pmap_allocpte_getpdp(pmap, lockp, va, true); if (pdp == NULL) { - vm_page_unwire_noq(m); - vm_page_free_zero(m); + pmap_free_pt_page(pmap, m, true); return (NULL); } KASSERT((*pdp & PG_V) == 0, @@ -4491,8 +4535,7 @@ pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp, /* Wire up a new PTE page */ pdp = pmap_allocpte_getpdp(pmap, lockp, va, false); if (pdp == NULL) { - vm_page_unwire_noq(m); - vm_page_free_zero(m); + pmap_free_pt_page(pmap, m, true); return (NULL); } if ((*pdp & PG_V) == 0) { @@ -4501,8 +4544,7 @@ pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp, lockp, va) == NULL) { pmap_allocpte_free_unref(pmap, va, pmap_pml4e(pmap, va)); - vm_page_unwire_noq(m); - vm_page_free_zero(m); + pmap_free_pt_page(pmap, m, true); return (NULL); } } else { @@ -4519,9 +4561,6 @@ pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp, *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; } - pmap_resident_count_inc(pmap, 1); - counter_u64_add(pt_page_count, 1); - return (m); } @@ -4682,16 +4721,12 @@ pmap_release(pmap_t pmap) pmap->pm_pmltop[LMSPML4I + i] = 0; } - vm_page_unwire_noq(m); - vm_page_free_zero(m); - counter_u64_add(pt_page_count, -1); + pmap_free_pt_page(NULL, m, true); if (pmap->pm_pmltopu != NULL) { m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap-> pm_pmltopu)); - vm_page_unwire_noq(m); - vm_page_free(m); - counter_u64_add(pt_page_count, -1); + pmap_free_pt_page(NULL, m, false); } if (pmap->pm_type == PT_X86 && (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) @@ -4800,14 +4835,11 @@ pmap_growkernel(vm_offset_t addr) pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end); if ((*pdpe & X86_PG_V) == 0) { /* We need a new PDP entry */ - nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO); + nkpg = pmap_alloc_pt_page(kernel_pmap, + kernel_vm_end >> PDPSHIFT, VM_ALLOC_WIRED | + VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); - counter_u64_add(pt_page_count, 1); paddr = VM_PAGE_TO_PHYS(nkpg); *pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M); @@ -4823,14 +4855,11 @@ pmap_growkernel(vm_offset_t addr) continue; } - nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end), - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | - VM_ALLOC_ZERO); + nkpg = pmap_alloc_pt_page(kernel_pmap, + pmap_pde_pindex(kernel_vm_end), VM_ALLOC_WIRED | + VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); - counter_u64_add(pt_page_count, 1); paddr = VM_PAGE_TO_PHYS(nkpg); newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M; pde_store(pde, newpdir); @@ -5071,7 +5100,7 @@ reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain) goto next_chunk; } /* Every freed mapping is for a 4 KB page. */ - pmap_resident_count_dec(pmap, freed); + pmap_resident_count_adj(pmap, -freed); PV_STAT(counter_u64_add(pv_entry_frees, freed)); PV_STAT(counter_u64_add(pv_entry_spare, freed)); PV_STAT(counter_u64_add(pv_entry_count, -freed)); @@ -5747,9 +5776,9 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, * priority (VM_ALLOC_INTERRUPT). Otherwise, the * priority is normal. */ - mpte = vm_page_alloc(NULL, pmap_pde_pindex(va), + mpte = pmap_alloc_pt_page(pmap, pmap_pde_pindex(va), (in_kernel ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); + VM_ALLOC_WIRED); /* * If the allocation of the new page table page fails, @@ -5760,12 +5789,8 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, return (FALSE); } - counter_u64_add(pt_page_count, 1); - - if (!in_kernel) { + if (!in_kernel) mpte->ref_count = NPTEPG; - pmap_resident_count_inc(pmap, 1); - } } mptepa = VM_PAGE_TO_PHYS(mpte); firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa); @@ -5898,7 +5923,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; if ((oldpde & PG_G) != 0) pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); - pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE); + pmap_resident_count_adj(pmap, -NBPDR / PAGE_SIZE); if (oldpde & PG_MANAGED) { CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME); pvh = pa_to_pvh(oldpde & PG_PS_FRAME); @@ -5923,7 +5948,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, if (mpte != NULL) { KASSERT(mpte->valid == VM_PAGE_BITS_ALL, ("pmap_remove_pde: pte page not promoted")); - pmap_resident_count_dec(pmap, 1); + pmap_resident_count_adj(pmap, -1); KASSERT(mpte->ref_count == NPTEPG, ("pmap_remove_pde: pte page ref count error")); mpte->ref_count = 0; @@ -5952,7 +5977,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, oldpte = pte_load_clear(ptq); if (oldpte & PG_W) pmap->pm_stats.wired_count -= 1; - pmap_resident_count_dec(pmap, 1); + pmap_resident_count_adj(pmap, -1); if (oldpte & PG_MANAGED) { m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) @@ -6122,7 +6147,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) MPASS((*pdpe & (PG_MANAGED | PG_G)) == 0); anyvalid = 1; *pdpe = 0; - pmap_resident_count_dec(pmap, NBPDP / PAGE_SIZE); + pmap_resident_count_adj(pmap, -NBPDP / PAGE_SIZE); mt = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, sva) & PG_FRAME); pmap_unwire_ptp(pmap, sva, mt, &free); continue; @@ -6257,7 +6282,7 @@ retry: PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); - pmap_resident_count_dec(pmap, 1); + pmap_resident_count_adj(pmap, -1); pde = pmap_pde(pmap, pv->pv_va); KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" " a 2mpage in page %p's pv list", m)); @@ -6724,7 +6749,7 @@ restart: else if ((pten & PG_W) == 0 && (origpte & PG_W) != 0) pmap->pm_stats.wired_count -= pagesizes[psind] / PAGE_SIZE; if ((origpte & PG_V) == 0) - pmap_resident_count_inc(pmap, pagesizes[psind] / PAGE_SIZE); + pmap_resident_count_adj(pmap, pagesizes[psind] / PAGE_SIZE); return (KERN_SUCCESS); @@ -6959,7 +6984,7 @@ retry: */ if ((newpte & PG_W) != 0) pmap->pm_stats.wired_count++; - pmap_resident_count_inc(pmap, 1); + pmap_resident_count_adj(pmap, 1); } /* @@ -7206,7 +7231,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, */ if ((newpde & PG_W) != 0) pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE; - pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE); + pmap_resident_count_adj(pmap, NBPDR / PAGE_SIZE); /* * Map the superpage. (This is not a promoted mapping; there will not @@ -7365,7 +7390,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, /* * Increment counters */ - pmap_resident_count_inc(pmap, 1); + pmap_resident_count_adj(pmap, 1); newpte = VM_PAGE_TO_PHYS(m) | PG_V | pmap_cache_bits(pmap, m->md.pat_mode, 0); @@ -7474,7 +7499,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, if ((*pde & PG_V) == 0) { pde_store(pde, pa | PG_PS | PG_M | PG_A | PG_U | PG_RW | PG_V); - pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE); + pmap_resident_count_adj(pmap, NBPDR / PAGE_SIZE); counter_u64_add(pmap_pde_mappings, 1); } else { /* Continue on if the PDE is already valid. */ @@ -7678,7 +7703,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, "pdpe %#lx sva %#lx eva %#lx va_next %#lx", *pdpe, addr, end_addr, va_next)); *pdpe = srcptepaddr & ~PG_W; - pmap_resident_count_inc(dst_pmap, NBPDP / PAGE_SIZE); + pmap_resident_count_adj(dst_pmap, NBPDP / PAGE_SIZE); continue; } @@ -7701,7 +7726,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr, PMAP_ENTER_NORECLAIM, &lock))) { *pde = srcptepaddr & ~PG_W; - pmap_resident_count_inc(dst_pmap, NBPDR / + pmap_resident_count_adj(dst_pmap, NBPDR / PAGE_SIZE); counter_u64_add(pmap_pde_mappings, 1); } else @@ -7748,7 +7773,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, * (referenced) bits during the copy. */ *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A); - pmap_resident_count_inc(dst_pmap, 1); + pmap_resident_count_adj(dst_pmap, 1); } else { pmap_abort_ptp(dst_pmap, addr, dstmpte); goto out; @@ -8152,7 +8177,7 @@ pmap_remove_pages(pmap_t pmap) /* Mark free */ pc->pc_map[field] |= bitmask; if (superpage) { - pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE); + pmap_resident_count_adj(pmap, -NBPDR / PAGE_SIZE); pvh = pa_to_pvh(tpte & PG_PS_FRAME); TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; @@ -8166,14 +8191,14 @@ pmap_remove_pages(pmap_t pmap) if (mpte != NULL) { KASSERT(mpte->valid == VM_PAGE_BITS_ALL, ("pmap_remove_pages: pte page not promoted")); - pmap_resident_count_dec(pmap, 1); + pmap_resident_count_adj(pmap, -1); KASSERT(mpte->ref_count == NPTEPG, ("pmap_remove_pages: pte page reference count error")); mpte->ref_count = 0; pmap_add_delayed_free_list(mpte, &free, FALSE); } } else { - pmap_resident_count_dec(pmap, 1); + pmap_resident_count_adj(pmap, -1); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if ((m->a.flags & PGA_WRITEABLE) != 0 && @@ -9095,13 +9120,13 @@ pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va) oldpdpe = *pdpe; KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V), ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V")); - if ((pdpg = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { + pdpg = pmap_alloc_pt_page(pmap, va >> PDPSHIFT, + VM_ALLOC_WIRED | VM_ALLOC_INTERRUPT); + if (pdpg == NULL) { CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); } - counter_u64_add(pt_page_count, 1); pdpgpa = VM_PAGE_TO_PHYS(pdpg); firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa); newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V; @@ -10113,16 +10138,8 @@ pmap_quick_remove_page(vm_offset_t addr) static vm_page_t pmap_large_map_getptp_unlocked(void) { - vm_page_t m; - - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_ZERO); - if (m != NULL) { - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - counter_u64_add(pt_page_count, 1); - } - return (m); + return (pmap_alloc_pt_page(kernel_pmap, 0, + VM_ALLOC_NORMAL | VM_ALLOC_ZERO)); } static vm_page_t