Date: Thu, 23 Apr 2026 18:16:20 +0000 From: John Baldwin <jhb@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: ac7d52740249 - main - pmap_map: Use void * for the return value instead of vm_offset_t Message-ID: <69ea61f4.450b4.67e3208@gitrepo.freebsd.org>
index | next in thread | raw e-mail
The branch main has been updated by jhb: URL: https://cgit.FreeBSD.org/src/commit/?id=ac7d52740249de51e805a7cd577b4374d6a6ae81 commit ac7d52740249de51e805a7cd577b4374d6a6ae81 Author: John Baldwin <jhb@FreeBSD.org> AuthorDate: 2026-04-23 17:05:54 +0000 Commit: John Baldwin <jhb@FreeBSD.org> CommitDate: 2026-04-23 17:05:54 +0000 pmap_map: Use void * for the return value instead of vm_offset_t Effort: CHERI upstreaming Reviewed by: kib Sponsored by: AFRL, DARPA Pull Request: https://github.com/freebsd/freebsd-src/pull/2068 --- sys/amd64/amd64/pmap.c | 4 ++-- sys/arm/arm/pmap-v6.c | 6 +++--- sys/arm64/arm64/pmap.c | 4 ++-- sys/dev/md/md.c | 2 +- sys/i386/i386/pmap.c | 4 ++-- sys/i386/i386/pmap_base.c | 2 +- sys/i386/include/pmap_base.h | 2 +- sys/powerpc/aim/mmu_oea.c | 6 +++--- sys/powerpc/aim/mmu_oea64.c | 11 +++++------ sys/powerpc/aim/mmu_radix.c | 9 +++++---- sys/powerpc/booke/pmap.c | 8 ++++---- sys/powerpc/include/mmuvar.h | 2 +- sys/powerpc/powerpc/pmap_dispatch.c | 2 +- sys/riscv/riscv/pmap.c | 4 ++-- sys/vm/pmap.h | 2 +- sys/vm/uma_core.c | 2 +- sys/vm/vm_page.c | 8 ++++---- sys/vm/vm_reserv.c | 2 +- 18 files changed, 40 insertions(+), 40 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 56c6d153aa53..5592dc3bb683 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -3923,10 +3923,10 @@ pmap_kremove(vm_offset_t va) * update '*virt' with the first usable address after the mapped * region. */ -vm_offset_t +void * pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { - return PHYS_TO_DMAP(start); + return ((void *)PHYS_TO_DMAP(start)); } /* diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index 6bb1ac02b2e3..2b0ebebefaec 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -1152,7 +1152,7 @@ pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) * * void pmap_kenter(vm_offset_t va, vm_size_t size, vm_paddr_t pa, int mode); * void pmap_kremove(vm_offset_t va); - * vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, + * void *pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, * int prot); * * NOTE: This is not SMP coherent stage. And physical page allocation is not @@ -1402,7 +1402,7 @@ pmap_kenter_pte1(vm_offset_t va, pt1_entry_t npte1) * NOTE: Read the comments above pmap_kenter_prot_attr() as * the function is used herein! */ -vm_offset_t +void * pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { vm_offset_t va, sva; @@ -1455,7 +1455,7 @@ pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) } tlb_flush_range(sva, va - sva); *virt = va; - return (sva); + return ((void *)sva); } /* diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index c2d19d8fd40e..bc024b4053c6 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -2559,10 +2559,10 @@ pmap_kremove_device(vm_offset_t sva, vm_size_t size) * update '*virt' with the first usable address after the mapped * region. */ -vm_offset_t +void * pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { - return PHYS_TO_DMAP(start); + return ((void *)PHYS_TO_DMAP(start)); } /* diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c index 2dcb56160fc6..5fec3181aaff 100644 --- a/sys/dev/md/md.c +++ b/sys/dev/md/md.c @@ -2131,7 +2131,7 @@ g_md_init(struct g_class *mp __unused) (long *) &paddr) != 0 || resource_int_value("md", i, "len", &len) != 0) break; - ptr = (char *)pmap_map(NULL, paddr, paddr + len, VM_PROT_READ); + ptr = pmap_map(NULL, paddr, paddr + len, VM_PROT_READ); if (ptr != NULL && len != 0) { sprintf(scratch, "preload%d 0x%016jx", i, (uintmax_t)paddr); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index c88b27397b47..efaaf103ab29 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -1790,7 +1790,7 @@ __CONCAT(PMTYPE, kremove)(vm_offset_t va) * update '*virt' with the first usable address after the mapped * region. */ -static vm_offset_t +static void * __CONCAT(PMTYPE, map)(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { @@ -1832,7 +1832,7 @@ __CONCAT(PMTYPE, map)(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, } pmap_invalidate_range_int(kernel_pmap, sva, va); *virt = va; - return (sva); + return ((void *)sva); } /* diff --git a/sys/i386/i386/pmap_base.c b/sys/i386/i386/pmap_base.c index 0137a4fb26f1..7a45279775cd 100644 --- a/sys/i386/i386/pmap_base.c +++ b/sys/i386/i386/pmap_base.c @@ -836,7 +836,7 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot)); } -vm_offset_t +void * pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { diff --git a/sys/i386/include/pmap_base.h b/sys/i386/include/pmap_base.h index 5ae2f3c8b366..73b76e2225eb 100644 --- a/sys/i386/include/pmap_base.h +++ b/sys/i386/include/pmap_base.h @@ -98,7 +98,7 @@ struct pmap_methods { void (*pm_page_set_memattr)(vm_page_t, vm_memattr_t); vm_paddr_t (*pm_extract)(pmap_t, vm_offset_t); vm_page_t (*pm_extract_and_hold)(pmap_t, vm_offset_t, vm_prot_t); - vm_offset_t (*pm_map)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); + void *(*pm_map)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); void (*pm_qenter)(void *sva, vm_page_t *, int); void (*pm_qremove)(void *, int); void (*pm_release)(pmap_t); diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index d2105d22d07d..e051bac45aed 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -291,7 +291,7 @@ bool moea_is_modified(vm_page_t); bool moea_is_prefaultable(pmap_t, vm_offset_t); bool moea_is_referenced(vm_page_t); int moea_ts_referenced(vm_page_t); -vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); +void *moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); static int moea_mincore(pmap_t, vm_offset_t, vm_paddr_t *); bool moea_page_exists_quick(pmap_t, vm_page_t); void moea_page_init(vm_page_t); @@ -1642,7 +1642,7 @@ moea_decode_kernel_ptr(vm_offset_t addr, int *is_user, * unchanged. We cannot and therefore do not; *virt is updated with the * first usable address after the mapped region. */ -vm_offset_t +void * moea_map(vm_offset_t *virt, vm_paddr_t pa_start, vm_paddr_t pa_end, int prot) { @@ -1653,7 +1653,7 @@ moea_map(vm_offset_t *virt, vm_paddr_t pa_start, for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) moea_kenter(va, pa_start); *virt = va; - return (sva); + return ((void *)sva); } /* diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index 27b214ccf1bf..ac0444ddade0 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -372,7 +372,7 @@ bool moea64_is_modified(vm_page_t); bool moea64_is_prefaultable(pmap_t, vm_offset_t); bool moea64_is_referenced(vm_page_t); int moea64_ts_referenced(vm_page_t); -vm_offset_t moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); +void *moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); bool moea64_page_exists_quick(pmap_t, vm_page_t); void moea64_page_init(vm_page_t); int moea64_page_wired_mappings(vm_page_t); @@ -2314,7 +2314,7 @@ moea64_decode_kernel_ptr(vm_offset_t addr, int *is_user, * unchanged. Other architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped region. */ -vm_offset_t +void * moea64_map(vm_offset_t *virt, vm_paddr_t pa_start, vm_paddr_t pa_end, int prot) { @@ -2331,7 +2331,7 @@ moea64_map(vm_offset_t *virt, vm_paddr_t pa_start, if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M) break; if (va == pa_end) - return (PHYS_TO_DMAP(pa_start)); + return ((void *)PHYS_TO_DMAP(pa_start)); } sva = *virt; va = sva; @@ -2340,7 +2340,7 @@ moea64_map(vm_offset_t *virt, vm_paddr_t pa_start, moea64_kenter(va, pa_start); *virt = va; - return (sva); + return ((void *)sva); } /* @@ -3419,10 +3419,9 @@ moea64_page_array_startup(long pages) if (vm_ndomains == 1) { size = round_page(pages * sizeof(struct vm_page)); pa = vm_phys_early_alloc(0, size); - vm_page_base = moea64_map(&vm_page_base, + vm_page_array = moea64_map(&vm_page_base, pa, pa + size, VM_PROT_READ | VM_PROT_WRITE); vm_page_array_size = pages; - vm_page_array = (vm_page_t)vm_page_base; return; } diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c index 42b906de2ff4..b8be7f188cb6 100644 --- a/sys/powerpc/aim/mmu_radix.c +++ b/sys/powerpc/aim/mmu_radix.c @@ -483,7 +483,7 @@ static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset, static int mmu_radix_growkernel(vm_offset_t); static void mmu_radix_init(void); static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *); -static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); +static void *mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); static void mmu_radix_pinit0(pmap_t); static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t); @@ -4030,14 +4030,14 @@ out: return (cleared + not_cleared); } -static vm_offset_t +static void * mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start, vm_paddr_t end, int prot __unused) { CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end, prot); - return (PHYS_TO_DMAP(start)); + return ((void *)PHYS_TO_DMAP(start)); } void @@ -6447,7 +6447,8 @@ mmu_radix_page_array_startup(long pages) pa = vm_phys_early_alloc(-1, end - start); - start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT); + start = (vm_offset_t)mmu_radix_map(&start, pa, end - start, + VM_MEMATTR_DEFAULT); #ifdef notyet /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */ for (va = start; va < end; va += L3_PAGE_SIZE) { diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 2ebe4d64fbaa..315e86aa64a9 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -307,7 +307,7 @@ static bool mmu_booke_is_modified(vm_page_t); static bool mmu_booke_is_prefaultable(pmap_t, vm_offset_t); static bool mmu_booke_is_referenced(vm_page_t); static int mmu_booke_ts_referenced(vm_page_t); -static vm_offset_t mmu_booke_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, +static void *mmu_booke_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); static int mmu_booke_mincore(pmap_t, vm_offset_t, vm_paddr_t *); @@ -1570,7 +1570,7 @@ mmu_booke_remove_all(vm_page_t m) /* * Map a range of physical addresses into kernel virtual address space. */ -static vm_offset_t +static void * mmu_booke_map(vm_offset_t *virt, vm_paddr_t pa_start, vm_paddr_t pa_end, int prot) { @@ -1580,7 +1580,7 @@ mmu_booke_map(vm_offset_t *virt, vm_paddr_t pa_start, #ifdef __powerpc64__ /* XXX: Handle memory not starting at 0x0. */ if (pa_end < ctob(Maxmem)) - return (PHYS_TO_DMAP(pa_start)); + return ((void *)PHYS_TO_DMAP(pa_start)); #endif while (pa_start < pa_end) { @@ -1590,7 +1590,7 @@ mmu_booke_map(vm_offset_t *virt, vm_paddr_t pa_start, } *virt = va; - return (sva); + return ((void *)sva); } /* diff --git a/sys/powerpc/include/mmuvar.h b/sys/powerpc/include/mmuvar.h index 7b2063ba7325..5eba81b88a09 100644 --- a/sys/powerpc/include/mmuvar.h +++ b/sys/powerpc/include/mmuvar.h @@ -66,7 +66,7 @@ typedef bool (*pmap_is_modified_t)(vm_page_t); typedef bool (*pmap_is_prefaultable_t)(pmap_t, vm_offset_t); typedef bool (*pmap_is_referenced_t)(vm_page_t); typedef int (*pmap_ts_referenced_t)(vm_page_t); -typedef vm_offset_t (*pmap_map_t)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); +typedef void *(*pmap_map_t)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); typedef void (*pmap_object_init_pt_t)(pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t); typedef bool (*pmap_page_exists_quick_t)(pmap_t, vm_page_t); diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index fccb7b6bf5a9..81c17c7abf20 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -169,7 +169,7 @@ DEFINE_PMAP_IFUNC(void, copy_pages, vm_offset_t b_offset, int xfersize)); DEFINE_PMAP_IFUNC(int, growkernel_nopanic, (vm_offset_t)); DEFINE_PMAP_IFUNC(void, init, (void)); -DEFINE_PMAP_IFUNC(vm_offset_t, map, (vm_offset_t *, vm_paddr_t, vm_paddr_t, int)); +DEFINE_PMAP_IFUNC(void *, map, (vm_offset_t *, vm_paddr_t, vm_paddr_t, int)); DEFINE_PMAP_IFUNC(int, pinit, (pmap_t)); DEFINE_PMAP_IFUNC(void, pinit0, (pmap_t)); DEFINE_PMAP_IFUNC(int, mincore, (pmap_t, vm_offset_t, vm_paddr_t *)); diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index 218f70222a22..050dc941b364 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -1368,11 +1368,11 @@ pmap_kremove_device(vm_offset_t sva, vm_size_t size) * update '*virt' with the first usable address after the mapped * region. */ -vm_offset_t +void * pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { - return PHYS_TO_DMAP(start); + return ((void *)PHYS_TO_DMAP(start)); } /* diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 14ba7b241349..e6dcd47d32f6 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -142,7 +142,7 @@ bool pmap_is_modified(vm_page_t m); bool pmap_is_prefaultable(pmap_t pmap, vm_offset_t va); bool pmap_is_referenced(vm_page_t m); bool pmap_is_valid_memattr(pmap_t, vm_memattr_t); -vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); +void *pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap); void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size); diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index 635856291a85..e6e872232f31 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -1899,7 +1899,7 @@ startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, } /* Allocate KVA and indirectly advance bootmem. */ - return ((void *)pmap_map(&bootmem, m->phys_addr, + return (pmap_map(&bootmem, m->phys_addr, m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE)); } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index b39d665f9e0f..2def369d739f 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -580,7 +580,7 @@ vm_page_startup(vm_offset_t vaddr) #endif int biggestone, i, segind; #ifdef WITNESS - vm_offset_t mapped; + void *mapped; int witness_size; #endif #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) @@ -610,8 +610,8 @@ vm_page_startup(vm_offset_t vaddr) new_end -= witness_size; mapped = pmap_map(&vaddr, new_end, new_end + witness_size, VM_PROT_READ | VM_PROT_WRITE); - bzero((void *)mapped, witness_size); - witness_startup((void *)mapped); + bzero(mapped, witness_size); + witness_startup(mapped); #endif #if MINIDUMP_PAGE_TRACKING @@ -636,7 +636,7 @@ vm_page_startup(vm_offset_t vaddr) } vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages)); new_end -= vm_page_dump_size; - vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, + vm_page_dump = pmap_map(&vaddr, new_end, new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); bzero((void *)vm_page_dump, vm_page_dump_size); #if MINIDUMP_STARTUP_PAGE_TRACKING diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c index bc4c47076975..7e5eb4e08d0c 100644 --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -1471,7 +1471,7 @@ vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) * next available virtual address is returned by reference. */ new_end = end - round_page(size); - vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end, + vm_reserv_array = pmap_map(vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE); bzero(vm_reserv_array, size);home | help
Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?69ea61f4.450b4.67e3208>
