Date: Thu, 23 Apr 2026 18:16:28 +0000 From: John Baldwin <jhb@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: 120a5e3e195f - main - DMAP_TO_VM_PAGE: Wrapper macro to map direct map address to a page Message-ID: <69ea61fc.44f4e.6789b707@gitrepo.freebsd.org>
index | next in thread | raw e-mail
The branch main has been updated by jhb: URL: https://cgit.FreeBSD.org/src/commit/?id=120a5e3e195f1f780d89ce689e23654422285d62 commit 120a5e3e195f1f780d89ce689e23654422285d62 Author: John Baldwin <jhb@FreeBSD.org> AuthorDate: 2026-04-23 17:05:55 +0000 Commit: John Baldwin <jhb@FreeBSD.org> CommitDate: 2026-04-23 17:05:55 +0000 DMAP_TO_VM_PAGE: Wrapper macro to map direct map address to a page Effort: CHERI upstreaming Suggested by: kib Reviewed by: kib Sponsored by: AFRL, DARPA Pull Request: https://github.com/freebsd/freebsd-src/pull/2068 --- sys/amd64/amd64/pmap.c | 30 ++++++++++++++--------------- sys/arm64/arm64/pmap.c | 4 ++-- sys/compat/linuxkpi/common/src/linux_page.c | 2 +- sys/kern/uipc_ktls.c | 2 +- sys/powerpc/aim/mmu_radix.c | 6 +++--- sys/powerpc/booke/pmap_64.c | 10 +++++----- sys/powerpc/powerpc/uma_machdep.c | 2 +- sys/riscv/riscv/pmap.c | 4 ++-- sys/vm/vm_page.h | 2 ++ 9 files changed, 32 insertions(+), 30 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 60a5b5a2da9a..6f8f767c40bd 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -4834,7 +4834,7 @@ pmap_release(pmap_t pmap) KASSERT(CPU_EMPTY(&pmap->pm_active), ("releasing active pmap %p", pmap)); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pmap->pm_pmltop)); + m = DMAP_TO_VM_PAGE(pmap->pm_pmltop); if (pmap_is_la57(pmap)) { for (i = NPML5EPG / 2; i < NPML5EPG; i++) @@ -4863,7 +4863,7 @@ pmap_release(pmap_t pmap) pmap_pt_page_count_pinit(pmap, -1); if (pmap->pm_pmltopu != NULL) { - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pmap->pm_pmltopu)); + m = DMAP_TO_VM_PAGE(pmap->pm_pmltopu); pmap_free_pt_page(NULL, m, false); pmap_pt_page_count_pinit(pmap, -1); } @@ -5348,7 +5348,7 @@ reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain) PV_STAT(counter_u64_add(pc_chunk_count, -1)); PV_STAT(counter_u64_add(pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ - m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc)); + m_pc = DMAP_TO_VM_PAGE(pc); dump_drop_page(m_pc->phys_addr); mtx_lock(&pvc->pvc_lock); TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru); @@ -5449,7 +5449,7 @@ free_pv_chunk_dequeued(struct pv_chunk *pc) PV_STAT(counter_u64_add(pc_chunk_frees, 1)); counter_u64_add(pv_page_count, -1); /* entire chunk is free, return it */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc)); + m = DMAP_TO_VM_PAGE(pc); dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); @@ -10711,7 +10711,7 @@ retry: goto retry; mphys = VM_PAGE_TO_PHYS(m); *pde = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx; - PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde))->ref_count++; + DMAP_TO_VM_PAGE(pde)->ref_count++; } else { MPASS((*pde & X86_PG_PS) == 0); mphys = *pde & PG_FRAME; @@ -10829,7 +10829,7 @@ pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr, *pde = pa | pg_g | X86_PG_PS | X86_PG_RW | X86_PG_V | X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap, mattr, true); - PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde))->ref_count++; + DMAP_TO_VM_PAGE(pde)->ref_count++; inc = NBPDR; } else { pte = pmap_large_map_pte(va); @@ -10837,7 +10837,7 @@ pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr, *pte = pa | pg_g | X86_PG_RW | X86_PG_V | X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap, mattr, false); - PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pte))->ref_count++; + DMAP_TO_VM_PAGE(pte)->ref_count++; inc = PAGE_SIZE; } } @@ -10905,7 +10905,7 @@ pmap_large_unmap(void *svaa, vm_size_t len) pd, len)); pde_store(pde, 0); inc = NBPDR; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde)); + m = DMAP_TO_VM_PAGE(pde); m->ref_count--; if (m->ref_count == 0) { *pdpe = 0; @@ -10919,12 +10919,12 @@ pmap_large_unmap(void *svaa, vm_size_t len) (u_long)pte, *pte)); pte_clear(pte); inc = PAGE_SIZE; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pte)); + m = DMAP_TO_VM_PAGE(pte); m->ref_count--; if (m->ref_count == 0) { *pde = 0; SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde)); + m = DMAP_TO_VM_PAGE(pde); m->ref_count--; if (m->ref_count == 0) { *pdpe = 0; @@ -11227,7 +11227,7 @@ pmap_pti_wire_pte(void *pte) vm_page_t m; VM_OBJECT_ASSERT_WLOCKED(pti_obj); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pte)); + m = DMAP_TO_VM_PAGE(pte); m->ref_count++; } @@ -11237,7 +11237,7 @@ pmap_pti_unwire_pde(void *pde, bool only_ref) vm_page_t m; VM_OBJECT_ASSERT_WLOCKED(pti_obj); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde)); + m = DMAP_TO_VM_PAGE(pde); MPASS(only_ref || m->ref_count > 1); pmap_pti_free_page(m); } @@ -11249,7 +11249,7 @@ pmap_pti_unwire_pte(void *pte, vm_offset_t va) pd_entry_t *pde; VM_OBJECT_ASSERT_WLOCKED(pti_obj); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pte)); + m = DMAP_TO_VM_PAGE(pte); if (pmap_pti_free_page(m)) { pde = pmap_pti_pde(va); MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V); @@ -12343,8 +12343,8 @@ DB_SHOW_COMMAND(ptpages, pmap_ptpages) ptpages_show_pml4(pg, NPML4EPG, PG_V); } } else { - ptpages_show_pml4(PHYS_TO_VM_PAGE(DMAP_TO_PHYS( - pmap->pm_pmltop)), NUP4ML4E, PG_V); + ptpages_show_pml4(DMAP_TO_VM_PAGE(pmap->pm_pmltop), NUP4ML4E, + PG_V); } } #endif diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 4e216ea01ab5..a0f8bbb4bed0 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -3459,7 +3459,7 @@ reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain) PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ - m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc)); + m_pc = DMAP_TO_VM_PAGE(pc); dump_drop_page(m_pc->phys_addr); mtx_lock(&pvc->pvc_lock); TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru); @@ -3561,7 +3561,7 @@ free_pv_chunk_dequeued(struct pv_chunk *pc) PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc)); + m = DMAP_TO_VM_PAGE(pc); dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c index 39edb34545b7..b91115a5ff16 100644 --- a/sys/compat/linuxkpi/common/src/linux_page.c +++ b/sys/compat/linuxkpi/common/src/linux_page.c @@ -241,7 +241,7 @@ linux_free_kmem(vm_offset_t addr, unsigned int order) } else { vm_page_t page; - page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr)); + page = DMAP_TO_VM_PAGE(addr); linux_free_pages(page, order); } } diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c index 5e8bf3ba8a5d..41b18b29929b 100644 --- a/sys/kern/uipc_ktls.c +++ b/sys/kern/uipc_ktls.c @@ -461,7 +461,7 @@ ktls_buffer_release(void *arg __unused, void **store, int count) int i, j; for (i = 0; i < count; i++) { - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(store[i])); + m = DMAP_TO_VM_PAGE(store[i]); for (j = 0; j < atop(ktls_maxlen); j++) { (void)vm_page_unwire_noq(m + j); vm_page_free(m + j); diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c index 63159107d856..dd3cb5128fd2 100644 --- a/sys/powerpc/aim/mmu_radix.c +++ b/sys/powerpc/aim/mmu_radix.c @@ -1497,7 +1497,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ - m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc)); + m_pc = DMAP_TO_VM_PAGE(pc); dump_drop_page(m_pc->phys_addr); mtx_lock(&pv_chunks_mutex); TAILQ_REMOVE(&pv_chunks, pc, pc_lru); @@ -1587,7 +1587,7 @@ free_pv_chunk(struct pv_chunk *pc) PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc)); + m = DMAP_TO_VM_PAGE(pc); dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); @@ -3649,7 +3649,7 @@ radix_pgd_release(void *arg __unused, void **store, int count) * XXX selectively remove dmap and KVA entries so we don't * need to bzero */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(store[i])); + m = DMAP_TO_VM_PAGE(store[i]); for (int j = page_count-1; j >= 0; j--) { vm_page_unwire_noq(&m[j]); SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss); diff --git a/sys/powerpc/booke/pmap_64.c b/sys/powerpc/booke/pmap_64.c index 6e08103f315c..08449b9407ae 100644 --- a/sys/powerpc/booke/pmap_64.c +++ b/sys/powerpc/booke/pmap_64.c @@ -275,7 +275,7 @@ get_pgtbl_page(pmap_t pmap, void **ptr_tbl, uint32_t index, } return (page); } - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page)); + m = DMAP_TO_VM_PAGE(page); page = ptr_tbl[index]; vm_page_unwire_noq(m); vm_page_free_zero(m); @@ -339,19 +339,19 @@ ptbl_unhold(pmap_t pmap, vm_offset_t va) ptbl = pdir[pdir_idx]; /* decrement hold count */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(ptbl)); + m = DMAP_TO_VM_PAGE(ptbl); if (!unhold_free_page(pmap, m)) return (0); pdir[pdir_idx] = NULL; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pdir)); + m = DMAP_TO_VM_PAGE(pdir); if (!unhold_free_page(pmap, m)) return (1); pdir_l1[pdir_l1_idx] = NULL; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pdir_l1)); + m = DMAP_TO_VM_PAGE(pdir_l1); if (!unhold_free_page(pmap, m)) return (1); @@ -372,7 +372,7 @@ ptbl_hold(pmap_t pmap, pte_t *ptbl) KASSERT((pmap != kernel_pmap), ("ptbl_hold: holding kernel ptbl!")); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(ptbl)); + m = DMAP_TO_VM_PAGE(ptbl); m->ref_count++; } diff --git a/sys/powerpc/powerpc/uma_machdep.c b/sys/powerpc/powerpc/uma_machdep.c index f5e3b8b3356d..637690b52695 100644 --- a/sys/powerpc/powerpc/uma_machdep.c +++ b/sys/powerpc/powerpc/uma_machdep.c @@ -80,7 +80,7 @@ uma_small_free(void *mem, vm_size_t size, u_int8_t flags) vm_page_t m; if (hw_direct_map) - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(mem)); + m = DMAP_TO_VM_PAGE(mem); else { m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)mem)); pmap_kremove((vm_offset_t)mem); diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index a49265250850..0cac747334a9 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -1924,7 +1924,7 @@ pmap_release(pmap_t pmap) finish: npages = pmap->pm_stage == PM_STAGE2 ? 4 : 1; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pmap->pm_top)); + m = DMAP_TO_VM_PAGE(pmap->pm_top); for (i = 0; i < npages; i++) { vm_page_unwire_noq(m); vm_page_free(m); @@ -2132,7 +2132,7 @@ free_pv_chunk(struct pv_chunk *pc) PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc)); + m = DMAP_TO_VM_PAGE(pc); dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index a091310ffd17..d0c1027a5f40 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -461,6 +461,8 @@ extern long first_page; /* first physical page number */ */ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); +#define DMAP_TO_VM_PAGE(va) PHYS_TO_VM_PAGE(DMAP_TO_PHYS(va)) + /* * vm_page allocation arguments for the functions vm_page_alloc(), * vm_page_alloc_contig(), vm_page_alloc_noobj(), vm_page_grab(), andhome | help
Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?69ea61fc.44f4e.6789b707>
