Date: Thu, 23 Apr 2026 18:16:17 +0000 From: John Baldwin <jhb@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: 2d0634d2e74b - main - kva_alloc/free: Use void * instead of vm_offset_t Message-ID: <69ea61f1.46085.3dc9c391@gitrepo.freebsd.org>
index | next in thread | raw e-mail
The branch main has been updated by jhb: URL: https://cgit.FreeBSD.org/src/commit/?id=2d0634d2e74bb697573afaf888207a8ad1ba3242 commit 2d0634d2e74bb697573afaf888207a8ad1ba3242 Author: John Baldwin <jhb@FreeBSD.org> AuthorDate: 2026-04-23 17:05:54 +0000 Commit: John Baldwin <jhb@FreeBSD.org> CommitDate: 2026-04-23 17:05:54 +0000 kva_alloc/free: Use void * instead of vm_offset_t Effort: CHERI upstreaming Reviewed by: kib Sponsored by: AFRL, DARPA Pull Request: https://github.com/freebsd/freebsd-src/pull/2068 --- sys/amd64/amd64/pmap.c | 6 +-- sys/arm/arm/pmap-v6.c | 24 ++++++------ sys/arm/include/pcpu.h | 2 +- sys/arm64/arm64/pmap.c | 58 ++++++++++++++--------------- sys/arm64/spe/arm_spe_backend.c | 8 ++-- sys/arm64/spe/arm_spe_dev.h | 13 ++++--- sys/compat/linux/linux_vdso.c | 7 +--- sys/compat/linuxkpi/common/src/linux_page.c | 6 +-- sys/dev/drm2/ttm/ttm_bo_util.c | 5 +-- sys/dev/gve/gve_qpl.c | 10 ++--- sys/dev/hwt/hwt_vm.c | 6 +-- sys/dev/hwt/hwt_vm.h | 2 +- sys/dev/md/md.c | 4 +- sys/dev/pci/controller/pci_n1sdp.c | 4 +- sys/dev/spibus/spigen.c | 4 +- sys/dev/xdma/xdma.h | 2 +- sys/dev/xdma/xdma_sg.c | 12 +++--- sys/i386/i386/copyout.c | 46 ++++++++++++----------- sys/i386/i386/pmap.c | 42 ++++++++++----------- sys/i386/include/md_var.h | 2 +- sys/i386/include/pcpu.h | 6 +-- sys/i386/linux/linux_copyout.c | 20 +++++----- sys/i386/pci/pci_cfgreg.c | 2 +- sys/kern/kern_kcov.c | 4 +- sys/kern/kern_sharedpage.c | 2 +- sys/kern/subr_devmap.c | 25 +++++++------ sys/kern/subr_sfbuf.c | 2 +- sys/kern/vfs_bio.c | 2 +- sys/powerpc/aim/mmu_oea.c | 16 ++++---- sys/powerpc/aim/mmu_oea64.c | 30 +++++++-------- sys/powerpc/aim/mmu_radix.c | 23 ++++++------ sys/powerpc/booke/pmap.c | 2 +- sys/powerpc/booke/pmap_32.c | 13 ++++--- sys/powerpc/include/pcpu.h | 2 +- sys/powerpc/mpc85xx/platform_mpc85xx.c | 11 +++--- sys/vm/uma_core.c | 12 +++--- sys/vm/uma_int.h | 2 +- sys/vm/vm_extern.h | 6 +-- sys/vm/vm_glue.c | 2 +- sys/vm/vm_init.c | 2 +- sys/vm/vm_kern.c | 24 ++++++------ sys/vm/vm_pager.c | 2 +- sys/x86/iommu/amd_drv.c | 12 +++--- 43 files changed, 246 insertions(+), 239 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 1f4be4cf1a50..56c6d153aa53 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -2415,7 +2415,7 @@ pmap_init_pv_table(void) pv_npg = howmany(pmap_last_pa, NBPDR); s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page); s = round_page(s); - pv_table = (struct pmap_large_md_page *)kva_alloc(s); + pv_table = kva_alloc(s); if (pv_table == NULL) panic("%s: kva_alloc failed\n", __func__); @@ -9448,7 +9448,7 @@ pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, int flags) if (!i) return ((void *)(va + offset)); } - va = kva_alloc(size); + va = (vm_offset_t)kva_alloc(size); if (va == 0) panic("%s: Couldn't allocate KVA", __func__); } @@ -9522,7 +9522,7 @@ pmap_unmapdev(void *p, vm_size_t size) } if (pmap_initialized) { pmap_qremove((void *)va, atop(size)); - kva_free(va, size); + kva_free((void *)va, size); } } diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index 5fe3d701e7ed..6bb1ac02b2e3 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -1213,7 +1213,7 @@ pmap_bootstrap(vm_offset_t firstaddr) mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); SYSMAP(caddr_t, pc->pc_cmap1_pte2p, pc->pc_cmap1_addr, 1); SYSMAP(caddr_t, pc->pc_cmap2_pte2p, pc->pc_cmap2_addr, 1); - SYSMAP(vm_offset_t, pc->pc_qmap_pte2p, pc->pc_qmap_addr, 1); + SYSMAP(caddr_t, pc->pc_qmap_pte2p, pc->pc_qmap_addr, 1); /* * Crashdump maps. @@ -1249,7 +1249,7 @@ static void pmap_init_reserved_pages(void *dummy __unused) { struct pcpu *pc; - vm_offset_t pages; + char *pages; int i; CPU_FOREACH(i) { @@ -1262,13 +1262,13 @@ pmap_init_reserved_pages(void *dummy __unused) continue; mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); pages = kva_alloc(PAGE_SIZE * 3); - if (pages == 0) + if (pages == NULL) panic("%s: unable to allocate KVA", __func__); - pc->pc_cmap1_pte2p = pt2map_entry(pages); - pc->pc_cmap2_pte2p = pt2map_entry(pages + PAGE_SIZE); - pc->pc_qmap_pte2p = pt2map_entry(pages + (PAGE_SIZE * 2)); - pc->pc_cmap1_addr = (caddr_t)pages; - pc->pc_cmap2_addr = (caddr_t)(pages + PAGE_SIZE); + pc->pc_cmap1_pte2p = pt2map_entry((vm_offset_t)pages); + pc->pc_cmap2_pte2p = pt2map_entry((vm_offset_t)pages + PAGE_SIZE); + pc->pc_qmap_pte2p = pt2map_entry((vm_offset_t)pages + (PAGE_SIZE * 2)); + pc->pc_cmap1_addr = pages; + pc->pc_cmap2_addr = pages + PAGE_SIZE; pc->pc_qmap_addr = pages + (PAGE_SIZE * 2); } } @@ -1803,7 +1803,7 @@ pmap_init(void) TAILQ_INIT(&pv_table[i].pv_list); pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); - pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); + pv_chunkbase = kva_alloc(PAGE_SIZE * pv_maxchunks); if (pv_chunkbase == NULL) panic("%s: not enough kvm for pv chunks", __func__); pmap_pte2list_init(&pv_vafree, pv_chunkbase, pv_maxchunks); @@ -6021,7 +6021,7 @@ pmap_quick_enter_page(vm_page_t m) pte2_store(pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, vm_page_pte2_attr(m))); - return ((void *)pc->pc_qmap_addr); + return (pc->pc_qmap_addr); } void @@ -6033,12 +6033,12 @@ pmap_quick_remove_page(void *addr) pc = get_pcpu(); pte2p = pc->pc_qmap_pte2p; - KASSERT(addr == (void *)pc->pc_qmap_addr, + KASSERT(addr == pc->pc_qmap_addr, ("%s: invalid address", __func__)); KASSERT(pte2_load(pte2p) != 0, ("%s: PTE2 not in use", __func__)); pte2_clear(pte2p); - tlb_flush(pc->pc_qmap_addr); + tlb_flush((vm_offset_t)pc->pc_qmap_addr); critical_exit(); } diff --git a/sys/arm/include/pcpu.h b/sys/arm/include/pcpu.h index 764ebdd987f0..22edff8848ea 100644 --- a/sys/arm/include/pcpu.h +++ b/sys/arm/include/pcpu.h @@ -55,7 +55,7 @@ struct vmspace; void *pc_cmap2_pte2p; \ caddr_t pc_cmap1_addr; \ caddr_t pc_cmap2_addr; \ - vm_offset_t pc_qmap_addr; \ + caddr_t pc_qmap_addr; \ void *pc_qmap_pte2p; \ unsigned int pc_dbreg[32]; \ int pc_dbreg_cmd; \ diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index f715fe4f3222..c2d19d8fd40e 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -1627,7 +1627,7 @@ pmap_init_pv_table(void) pmap_l2_pindex(seg->start); s += round_page(pages * sizeof(*pvd)); } - pv_table = (struct pmap_large_md_page *)kva_alloc(s); + pv_table = kva_alloc(s); if (pv_table == NULL) panic("%s: kva_alloc failed\n", __func__); @@ -8086,7 +8086,7 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size) offset = pa & PAGE_MASK; size = round_page(offset + size); - va = kva_alloc(size); + va = (vm_offset_t)kva_alloc(size); if (va == 0) panic("%s: Couldn't allocate KVA", __func__); @@ -8174,7 +8174,7 @@ pmap_unmapbios(void *p, vm_size_t size) /* Unmap and invalidate the pages */ pmap_kremove_device(va, size); - kva_free(va, size); + kva_free((void *)va, size); } } @@ -8475,7 +8475,7 @@ static pt_entry_t * pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va) { pt_entry_t *l2, newl2, oldl1; - vm_offset_t tmpl1; + char *tmpl1; vm_paddr_t l2phys, phys; vm_page_t ml2; int i; @@ -8492,10 +8492,10 @@ pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va) KASSERT((oldl1 & ATTR_SW_NO_PROMOTE) == 0, ("pmap_demote_l1: Demoting entry with no-demote flag set")); - tmpl1 = 0; + tmpl1 = NULL; if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) { tmpl1 = kva_alloc(PAGE_SIZE); - if (tmpl1 == 0) + if (tmpl1 == NULL) return (NULL); } @@ -8525,8 +8525,8 @@ pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va) L2_BLOCK), ("Invalid l2 page (%lx != %lx)", l2[0], ATTR_CONTIGUOUS | (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK)); - if (tmpl1 != 0) { - pmap_kenter(tmpl1, PAGE_SIZE, + if (tmpl1 != NULL) { + pmap_kenter((vm_offset_t)tmpl1, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, VM_MEMATTR_WRITE_BACK); l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK)); @@ -8536,8 +8536,8 @@ pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va) counter_u64_add(pmap_l1_demotions, 1); fail: - if (tmpl1 != 0) { - pmap_kremove(tmpl1); + if (tmpl1 != NULL) { + pmap_kremove((vm_offset_t)tmpl1); kva_free(tmpl1, PAGE_SIZE); } @@ -8605,7 +8605,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, struct rwlock **lockp) { pt_entry_t *l3, newl3, oldl2; - vm_offset_t tmpl2; + char *tmpl2; vm_paddr_t l3phys; vm_page_t ml3; @@ -8622,10 +8622,10 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, ("pmap_demote_l2: Demoting entry with no-demote flag set")); va &= ~L2_OFFSET; - tmpl2 = 0; + tmpl2 = NULL; if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) { tmpl2 = kva_alloc(PAGE_SIZE); - if (tmpl2 == 0) + if (tmpl2 == NULL) return (NULL); } @@ -8720,8 +8720,8 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, /* * Map the temporary page so we don't lose access to the l2 table. */ - if (tmpl2 != 0) { - pmap_kenter(tmpl2, PAGE_SIZE, + if (tmpl2 != NULL) { + pmap_kenter((vm_offset_t)tmpl2, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, VM_MEMATTR_WRITE_BACK); l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK)); @@ -8755,8 +8755,8 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, " in pmap %p %lx", va, pmap, l3[0]); fail: - if (tmpl2 != 0) { - pmap_kremove(tmpl2); + if (tmpl2 != NULL) { + pmap_kremove((vm_offset_t)tmpl2); kva_free(tmpl2, PAGE_SIZE); } @@ -8784,7 +8784,7 @@ static bool pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va) { pd_entry_t *l2c_end, *l2c_start, l2e, mask, nbits, *tl2p; - vm_offset_t tmpl3; + char *tmpl3; register_t intr; PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -8792,13 +8792,13 @@ pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va) l2c_start = (pd_entry_t *)((uintptr_t)l2p & ~((L2C_ENTRIES * sizeof(pd_entry_t)) - 1)); l2c_end = l2c_start + L2C_ENTRIES; - tmpl3 = 0; + tmpl3 = NULL; if ((va & ~L2C_OFFSET) < (vm_offset_t)l2c_end && (vm_offset_t)l2c_start < (va & ~L2C_OFFSET) + L2C_SIZE) { tmpl3 = kva_alloc(PAGE_SIZE); - if (tmpl3 == 0) + if (tmpl3 == NULL) return (false); - pmap_kenter(tmpl3, PAGE_SIZE, + pmap_kenter((vm_offset_t)tmpl3, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l2c_start) & ~L3_OFFSET, VM_MEMATTR_WRITE_BACK); l2c_start = (pd_entry_t *)(tmpl3 + @@ -8857,8 +8857,8 @@ pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va) dsb(ishst); intr_restore(intr); - if (tmpl3 != 0) { - pmap_kremove(tmpl3); + if (tmpl3 != NULL) { + pmap_kremove((vm_offset_t)tmpl3); kva_free(tmpl3, PAGE_SIZE); } counter_u64_add(pmap_l2c_demotions, 1); @@ -8874,20 +8874,20 @@ static bool pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va) { pt_entry_t *l3c_end, *l3c_start, l3e, mask, nbits, *tl3p; - vm_offset_t tmpl3; + char *tmpl3; register_t intr; PMAP_LOCK_ASSERT(pmap, MA_OWNED); l3c_start = (pt_entry_t *)((uintptr_t)l3p & ~((L3C_ENTRIES * sizeof(pt_entry_t)) - 1)); l3c_end = l3c_start + L3C_ENTRIES; - tmpl3 = 0; + tmpl3 = NULL; if ((va & ~L3C_OFFSET) < (vm_offset_t)l3c_end && (vm_offset_t)l3c_start < (va & ~L3C_OFFSET) + L3C_SIZE) { tmpl3 = kva_alloc(PAGE_SIZE); - if (tmpl3 == 0) + if (tmpl3 == NULL) return (false); - pmap_kenter(tmpl3, PAGE_SIZE, + pmap_kenter((vm_offset_t)tmpl3, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l3c_start) & ~L3_OFFSET, VM_MEMATTR_WRITE_BACK); l3c_start = (pt_entry_t *)(tmpl3 + @@ -8946,8 +8946,8 @@ pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va) dsb(ishst); intr_restore(intr); - if (tmpl3 != 0) { - pmap_kremove(tmpl3); + if (tmpl3 != NULL) { + pmap_kremove((vm_offset_t)tmpl3); kva_free(tmpl3, PAGE_SIZE); } counter_u64_add(pmap_l3c_demotions, 1); diff --git a/sys/arm64/spe/arm_spe_backend.c b/sys/arm64/spe/arm_spe_backend.c index c8d7de8f0c8c..f9eff13a939a 100644 --- a/sys/arm64/spe/arm_spe_backend.c +++ b/sys/arm64/spe/arm_spe_backend.c @@ -229,15 +229,15 @@ spe_backend_deinit(struct hwt_context *ctx) CPU_FOREACH_ISSET(cpu_id, &ctx->cpu_map) { info = &spe_info_cpu[cpu_id]; printf("CPU %u:\n", cpu_id); - hex_dump((void *)info->kvaddr, 128); - hex_dump((void *)(info->kvaddr + (info->buf_size/2)), 128); + hex_dump(info->kvaddr, 128); + hex_dump((char *)info->kvaddr + (info->buf_size/2), 128); } } else { TAILQ_FOREACH(thr, &ctx->threads, next) { info = (struct arm_spe_info *)thr->private; printf("TID %u:\n", thr->thread_id); - hex_dump((void *)info->kvaddr, 128); - hex_dump((void *)(info->kvaddr + (info->buf_size/2)), 128); + hex_dump(info->kvaddr, 128); + hex_dump((char *)info->kvaddr + (info->buf_size/2), 128); } } #endif diff --git a/sys/arm64/spe/arm_spe_dev.h b/sys/arm64/spe/arm_spe_dev.h index ed1727b5b090..186274d2aa36 100644 --- a/sys/arm64/spe/arm_spe_dev.h +++ b/sys/arm64/spe/arm_spe_dev.h @@ -111,7 +111,7 @@ struct arm_spe_info { /* buffer is split in half as a ping-pong buffer */ vm_object_t bufobj; - vm_offset_t kvaddr; + char *kvaddr; size_t buf_size; uint8_t buf_idx : 1; /* 0 = first half of buf, 1 = 2nd half */ struct arm_spe_buf_info buf_info[2]; @@ -141,24 +141,25 @@ struct arm_spe_queue { static inline vm_offset_t buf_start_addr(u_int buf_idx, struct arm_spe_info *info) { - vm_offset_t addr; + char *addr; if (buf_idx == 0) addr = info->kvaddr; if (buf_idx == 1) - addr = info->kvaddr + (info->buf_size/2); + addr = info->kvaddr + (info->buf_size/2); - return (addr); + return ((vm_offset_t)addr); } static inline vm_offset_t buf_end_addr(u_int buf_idx, struct arm_spe_info *info) { - vm_offset_t addr; + char *addr; + if (buf_idx == 0) addr = info->kvaddr + (info->buf_size/2); if (buf_idx == 1) addr = info->kvaddr + info->buf_size; - return (addr); + return ((vm_offset_t)addr); } #endif /* _ARM64_ARM_SPE_DEV_H_ */ diff --git a/sys/compat/linux/linux_vdso.c b/sys/compat/linux/linux_vdso.c index 6e66aff69378..4e7b31ecdab4 100644 --- a/sys/compat/linux/linux_vdso.c +++ b/sys/compat/linux/linux_vdso.c @@ -69,7 +69,7 @@ __elfN(linux_shared_page_init)(char **mapping, vm_size_t size) pages = size / PAGE_SIZE; - addr = (char *)kva_alloc(size); + addr = kva_alloc(size); obj = vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0, NULL); VM_OBJECT_WLOCK(obj); @@ -89,11 +89,8 @@ void __elfN(linux_shared_page_fini)(vm_object_t obj, void *mapping, vm_size_t size) { - vm_offset_t va; - - va = (vm_offset_t)mapping; pmap_qremove(mapping, size / PAGE_SIZE); - kva_free(va, size); + kva_free(mapping, size); vm_object_deallocate(obj); } diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c index 41f5fd557330..10ede3287bb4 100644 --- a/sys/compat/linuxkpi/common/src/linux_page.c +++ b/sys/compat/linuxkpi/common/src/linux_page.c @@ -428,7 +428,7 @@ vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) size_t size; size = count * PAGE_SIZE; - off = (void *)kva_alloc(size); + off = kva_alloc(size); if (off == NULL) return (NULL); vmmap_add(off, size); @@ -451,7 +451,7 @@ linuxkpi_vmap_pfn(unsigned long *pfns, unsigned int count, int prot) unsigned int i, c, chunk; size = ptoa(count); - off = (void *)kva_alloc(size); + off = kva_alloc(size); if (off == NULL) return (NULL); vmmap_add(off, size); @@ -502,7 +502,7 @@ vunmap(void *addr) if (vmmap == NULL) return; pmap_qremove(addr, vmmap->vm_size / PAGE_SIZE); - kva_free((vm_offset_t)addr, vmmap->vm_size); + kva_free(addr, vmmap->vm_size); kfree(vmmap); } diff --git a/sys/dev/drm2/ttm/ttm_bo_util.c b/sys/dev/drm2/ttm/ttm_bo_util.c index 1734a8103cde..4d4de90b6525 100644 --- a/sys/dev/drm2/ttm/ttm_bo_util.c +++ b/sys/dev/drm2/ttm/ttm_bo_util.c @@ -510,7 +510,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, VM_MEMATTR_DEFAULT : ttm_io_prot(mem->placement); map->bo_kmap_type = ttm_bo_map_vmap; map->num_pages = num_pages; - map->virtual = (void *)kva_alloc(num_pages * PAGE_SIZE); + map->virtual = kva_alloc(num_pages * PAGE_SIZE); if (map->virtual != NULL) { for (i = 0; i < num_pages; i++) { /* XXXKIB hack */ @@ -572,8 +572,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) break; case ttm_bo_map_vmap: pmap_qremove(map->virtual, map->num_pages); - kva_free((vm_offset_t)map->virtual, - map->num_pages * PAGE_SIZE); + kva_free(map->virtual, map->num_pages * PAGE_SIZE); break; case ttm_bo_map_kmap: sf_buf_free(map->sf); diff --git a/sys/dev/gve/gve_qpl.c b/sys/dev/gve/gve_qpl.c index f04e82497fa4..1f153d08c126 100644 --- a/sys/dev/gve/gve_qpl.c +++ b/sys/dev/gve/gve_qpl.c @@ -47,7 +47,7 @@ gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl) if (qpl->kva) { pmap_qremove(qpl->kva, qpl->num_pages); - kva_free((vm_offset_t)qpl->kva, PAGE_SIZE * qpl->num_pages); + kva_free(qpl->kva, PAGE_SIZE * qpl->num_pages); } for (i = 0; i < qpl->num_pages; i++) { @@ -60,7 +60,7 @@ gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl) if (vm_page_unwire_noq(qpl->pages[i])) { if (!qpl->kva) { pmap_qremove(qpl->dmas[i].cpu_addr, 1); - kva_free((vm_offset_t)qpl->dmas[i].cpu_addr, PAGE_SIZE); + kva_free(qpl->dmas[i].cpu_addr, PAGE_SIZE); } vm_page_free(qpl->pages[i]); } @@ -106,7 +106,7 @@ gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva) qpl->kva = NULL; if (single_kva) { - qpl->kva = (char *)kva_alloc(PAGE_SIZE * npages); + qpl->kva = kva_alloc(PAGE_SIZE * npages); if (!qpl->kva) { device_printf(priv->dev, "Failed to create the single kva for QPL %d\n", id); err = ENOMEM; @@ -120,7 +120,7 @@ gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva) VM_ALLOC_ZERO); if (!single_kva) { - qpl->dmas[i].cpu_addr = (void *)kva_alloc(PAGE_SIZE); + qpl->dmas[i].cpu_addr = kva_alloc(PAGE_SIZE); if (!qpl->dmas[i].cpu_addr) { device_printf(priv->dev, "Failed to create kva for page %d in QPL %d", i, id); err = ENOMEM; @@ -253,7 +253,7 @@ gve_mextadd_free(struct mbuf *mbuf) */ if (__predict_false(vm_page_unwire_noq(page))) { pmap_qremove(va, 1); - kva_free((vm_offset_t)va, PAGE_SIZE); + kva_free(va, PAGE_SIZE); vm_page_free(page); } } diff --git a/sys/dev/hwt/hwt_vm.c b/sys/dev/hwt/hwt_vm.c index a3e906d71099..18bbdbe37a99 100644 --- a/sys/dev/hwt/hwt_vm.c +++ b/sys/dev/hwt/hwt_vm.c @@ -127,7 +127,7 @@ hwt_vm_alloc_pages(struct hwt_vm *vm, int kva_req) if (kva_req) { vm->kvaddr = kva_alloc(vm->npages * PAGE_SIZE); - if (!vm->kvaddr) + if (vm->kvaddr == NULL) return (ENOMEM); } @@ -441,8 +441,8 @@ hwt_vm_destroy_buffers(struct hwt_vm *vm) vm_page_t m; int i; - if (vm->ctx->hwt_backend->kva_req && vm->kvaddr != 0) { - pmap_qremove((void *)vm->kvaddr, vm->npages); + if (vm->ctx->hwt_backend->kva_req && vm->kvaddr != NULL) { + pmap_qremove(vm->kvaddr, vm->npages); kva_free(vm->kvaddr, vm->npages * PAGE_SIZE); } VM_OBJECT_WLOCK(vm->obj); diff --git a/sys/dev/hwt/hwt_vm.h b/sys/dev/hwt/hwt_vm.h index 5002bd43e093..6c9bdf48ae6d 100644 --- a/sys/dev/hwt/hwt_vm.h +++ b/sys/dev/hwt/hwt_vm.h @@ -33,7 +33,7 @@ struct hwt_vm { vm_page_t *pages; int npages; vm_object_t obj; - vm_offset_t kvaddr; + void *kvaddr; struct cdev *cdev; struct hwt_context *ctx; diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c index e2c0451b5843..2dcb56160fc6 100644 --- a/sys/dev/md/md.c +++ b/sys/dev/md/md.c @@ -1512,7 +1512,7 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) goto bad; } - sc->s_vnode.kva = (char *)kva_alloc(maxphys + PAGE_SIZE); + sc->s_vnode.kva = kva_alloc(maxphys + PAGE_SIZE); return (0); bad: VOP_UNLOCK(nd.ni_vp); @@ -1567,7 +1567,7 @@ mddestroy(struct md_s *sc, struct thread *td) sc->cred, td); } if (sc->s_vnode.kva != NULL) - kva_free((vm_offset_t)sc->s_vnode.kva, maxphys + PAGE_SIZE); + kva_free(sc->s_vnode.kva, maxphys + PAGE_SIZE); break; case MD_SWAP: if (sc->s_swap.object != NULL) diff --git a/sys/dev/pci/controller/pci_n1sdp.c b/sys/dev/pci/controller/pci_n1sdp.c index c1f8624e45aa..60664eec569e 100644 --- a/sys/dev/pci/controller/pci_n1sdp.c +++ b/sys/dev/pci/controller/pci_n1sdp.c @@ -100,7 +100,7 @@ n1sdp_init(struct generic_pcie_n1sdp_softc *sc) MPASS(m[i] != NULL); } - vaddr = (void *)kva_alloc((vm_size_t)BDF_TABLE_SIZE); + vaddr = kva_alloc((vm_size_t)BDF_TABLE_SIZE); if (vaddr == NULL) { printf("%s: Can't allocate KVA memory.", __func__); error = ENXIO; @@ -130,7 +130,7 @@ n1sdp_init(struct generic_pcie_n1sdp_softc *sc) out_pmap: pmap_qremove(vaddr, nitems(m)); - kva_free((vm_offset_t)vaddr, (vm_size_t)BDF_TABLE_SIZE); + kva_free(vaddr, (vm_size_t)BDF_TABLE_SIZE); out: vm_phys_fictitious_unreg_range(paddr, paddr + BDF_TABLE_SIZE); diff --git a/sys/dev/spibus/spigen.c b/sys/dev/spibus/spigen.c index 400ae1e139ad..8f7dbb504537 100644 --- a/sys/dev/spibus/spigen.c +++ b/sys/dev/spibus/spigen.c @@ -285,7 +285,7 @@ spigen_mmap_cleanup(void *arg) if (mmap->kvaddr != NULL) { pmap_qremove(mmap->kvaddr, mmap->bufsize / PAGE_SIZE); - kva_free((vm_offset_t)mmap->kvaddr, mmap->bufsize); + kva_free(mmap->kvaddr, mmap->bufsize); } if (mmap->bufobj != NULL) vm_object_deallocate(mmap->bufobj); @@ -312,7 +312,7 @@ spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, return (EBUSY); mmap = malloc(sizeof(*mmap), M_DEVBUF, M_ZERO | M_WAITOK); - if ((mmap->kvaddr = (void *)kva_alloc(size)) == 0) { + if ((mmap->kvaddr = kva_alloc(size)) == 0) { spigen_mmap_cleanup(mmap); return (ENOMEM); } diff --git a/sys/dev/xdma/xdma.h b/sys/dev/xdma/xdma.h index 40f6ea8f6f98..2e1c0d64ee46 100644 --- a/sys/dev/xdma/xdma.h +++ b/sys/dev/xdma/xdma.h @@ -93,7 +93,7 @@ struct xchan_buf { bus_dmamap_t map; uint32_t nsegs; uint32_t nsegs_left; - vm_offset_t vaddr; + void *vaddr; vm_offset_t paddr; vm_size_t size; }; diff --git a/sys/dev/xdma/xdma_sg.c b/sys/dev/xdma/xdma_sg.c index ccf721e3c16c..c102e9b9f456 100644 --- a/sys/dev/xdma/xdma_sg.c +++ b/sys/dev/xdma/xdma_sg.c @@ -75,9 +75,9 @@ xchan_bufs_free_reserved(xdma_channel_t *xchan) xr = &xchan->xr_mem[i]; size = xr->buf.size; if (xr->buf.vaddr) { - pmap_kremove_device(xr->buf.vaddr, size); + pmap_kremove_device((vm_offset_t)xr->buf.vaddr, size); kva_free(xr->buf.vaddr, size); - xr->buf.vaddr = 0; + xr->buf.vaddr = NULL; } if (xr->buf.paddr) { vmem_free(xchan->vmem, xr->buf.paddr, size); @@ -115,13 +115,13 @@ xchan_bufs_alloc_reserved(xdma_channel_t *xchan) xr->buf.size = size; xr->buf.paddr = addr; xr->buf.vaddr = kva_alloc(size); - if (xr->buf.vaddr == 0) { + if (xr->buf.vaddr == NULL) { device_printf(xdma->dev, "%s: Can't allocate KVA\n", __func__); xchan_bufs_free_reserved(xchan); return (ENOMEM); } - pmap_kenter_device(xr->buf.vaddr, size, addr); + pmap_kenter_device((vm_offset_t)xr->buf.vaddr, size, addr); } return (0); @@ -346,7 +346,7 @@ xchan_seg_done(xdma_channel_t *xchan, if (xr->req_type == XR_TYPE_MBUF && xr->direction == XDMA_DEV_TO_MEM) m_copyback(xr->m, 0, st->transferred, - (void *)xr->buf.vaddr); + xr->buf.vaddr); } else if (xchan->caps & XCHAN_CAP_IOMMU) { if (xr->direction == XDMA_MEM_TO_DEV) addr = xr->src_addr; @@ -500,7 +500,7 @@ _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr, if (xchan->caps & XCHAN_CAP_BOUNCE) { if (xr->direction == XDMA_MEM_TO_DEV) m_copydata(m, 0, m->m_pkthdr.len, - (void *)xr->buf.vaddr); + xr->buf.vaddr); seg[0].ds_addr = (bus_addr_t)xr->buf.paddr; } else if (xchan->caps & XCHAN_CAP_IOMMU) { addr = mtod(m, bus_addr_t); diff --git a/sys/i386/i386/copyout.c b/sys/i386/i386/copyout.c index 6f73c968b590..1697874750c4 100644 --- a/sys/i386/i386/copyout.c +++ b/sys/i386/i386/copyout.c @@ -89,11 +89,11 @@ copyout_init_tramp(void) int cp_slow0(vm_offset_t uva, size_t len, bool write, - void (*f)(vm_offset_t, void *), void *arg) + void (*f)(void *, void *), void *arg) { struct pcpu *pc; vm_page_t m[2]; - vm_offset_t kaddr; + char *kaddr; int error, i, plen; bool sleepable; @@ -117,7 +117,7 @@ cp_slow0(vm_offset_t uva, size_t len, bool write, sx_xlock(&pc->pc_copyout_slock); kaddr = pc->pc_copyout_saddr; } - pmap_cp_slow0_map(kaddr, plen, m); + pmap_cp_slow0_map((vm_offset_t)kaddr, plen, m); kaddr += uva - trunc_page(uva); f(kaddr, arg); sched_unpin(); @@ -130,23 +130,25 @@ cp_slow0(vm_offset_t uva, size_t len, bool write, } struct copyinstr_arg0 { - vm_offset_t kc; + char *kc; size_t len; size_t alen; bool end; }; static void -copyinstr_slow0(vm_offset_t kva, void *arg) +copyinstr_slow0(void *kva, void *arg) { struct copyinstr_arg0 *ca; + char *src; char c; ca = arg; + src = kva; MPASS(ca->alen == 0 && ca->len > 0 && !ca->end); while (ca->alen < ca->len && !ca->end) { - c = *(char *)(kva + ca->alen); - *(char *)ca->kc = c; + c = *(src + ca->alen); + *ca->kc = c; ca->alen++; ca->kc++; if (c == '\0') @@ -164,7 +166,7 @@ copyinstr(const void *udaddr, void *kaddr, size_t maxlen, size_t *lencopied) error = 0; ca.end = false; - for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr; + for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = kaddr; plen < maxlen && !ca.end; uc += ca.alen, plen += ca.alen) { ca.len = round_page(uc) - uc; if (ca.len == 0) @@ -185,17 +187,17 @@ copyinstr(const void *udaddr, void *kaddr, size_t maxlen, size_t *lencopied) } struct copyin_arg0 { - vm_offset_t kc; + char *kc; size_t len; }; static void -copyin_slow0(vm_offset_t kva, void *arg) +copyin_slow0(void *kva, void *arg) { struct copyin_arg0 *ca; ca = arg; - bcopy((void *)kva, (void *)ca->kc, ca->len); + bcopy(kva, ca->kc, ca->len); } int @@ -211,7 +213,7 @@ copyin(const void *udaddr, void *kaddr, size_t len) if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ && copyin_fast_tramp(udaddr, kaddr, len, pmap_get_kcr3()) == 0)) return (0); - for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr; + for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = kaddr; plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) { ca.len = round_page(uc) - uc; if (ca.len == 0) @@ -225,12 +227,12 @@ copyin(const void *udaddr, void *kaddr, size_t len) } static void -copyout_slow0(vm_offset_t kva, void *arg) +copyout_slow0(void *kva, void *arg) { struct copyin_arg0 *ca; ca = arg; - bcopy((void *)ca->kc, (void *)kva, ca->len); + bcopy(ca->kc, kva, ca->len); } int @@ -246,7 +248,7 @@ copyout(const void *kaddr, void *udaddr, size_t len) if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ && copyout_fast_tramp(kaddr, udaddr, len, pmap_get_kcr3()) == 0)) return (0); - for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr; + for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = __DECONST(void *, kaddr); plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) { ca.len = round_page(uc) - uc; if (ca.len == 0) @@ -265,7 +267,7 @@ copyout(const void *kaddr, void *udaddr, size_t len) */ static void -fubyte_slow0(vm_offset_t kva, void *arg) +fubyte_slow0(void *kva, void *arg) { *(int *)arg = *(u_char *)kva; @@ -291,7 +293,7 @@ fubyte(volatile const void *base) } static void -fuword16_slow0(vm_offset_t kva, void *arg) +fuword16_slow0(void *kva, void *arg) { *(int *)arg = *(uint16_t *)kva; @@ -317,7 +319,7 @@ fuword16(volatile const void *base) } static void -fueword_slow0(vm_offset_t kva, void *arg) +fueword_slow0(void *kva, void *arg) { *(uint32_t *)arg = *(uint32_t *)kva; @@ -354,7 +356,7 @@ fueword32(volatile const void *base, int32_t *val) */ static void -subyte_slow0(vm_offset_t kva, void *arg) +subyte_slow0(void *kva, void *arg) { *(u_char *)kva = *(int *)arg; @@ -374,7 +376,7 @@ subyte(volatile void *base, int byte) } static void -suword16_slow0(vm_offset_t kva, void *arg) +suword16_slow0(void *kva, void *arg) { *(int *)kva = *(uint16_t *)arg; @@ -395,7 +397,7 @@ suword16(volatile void *base, int word) } static void -suword_slow0(vm_offset_t kva, void *arg) +suword_slow0(void *kva, void *arg) { *(int *)kva = *(uint32_t *)arg; @@ -428,7 +430,7 @@ struct casueword_arg0 { }; static void -casueword_slow0(vm_offset_t kva, void *arg) +casueword_slow0(void *kva, void *arg) { struct casueword_arg0 *ca; diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 9d758bc2b63d..c88b27397b47 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -665,7 +665,7 @@ __CONCAT(PMTYPE, bootstrap)(vm_paddr_t firstaddr) mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1) SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1) - SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1) + SYSMAP(caddr_t, pte, pc->pc_qmap_addr, 1) SYSMAP(caddr_t, CMAP3, CADDR3, 1); @@ -723,7 +723,7 @@ static void pmap_init_reserved_pages(void *dummy __unused) { struct pcpu *pc; - vm_offset_t pages; + char *pages; int i; #ifdef PMAP_PAE_COMP @@ -738,13 +738,13 @@ pmap_init_reserved_pages(void *dummy __unused) mtx_init(&pc->pc_copyout_mlock, "cpmlk", NULL, MTX_DEF | MTX_NEW); pc->pc_copyout_maddr = kva_alloc(ptoa(2)); - if (pc->pc_copyout_maddr == 0) + if (pc->pc_copyout_maddr == NULL) panic("unable to allocate non-sleepable copyout KVA"); sx_init(&pc->pc_copyout_slock, "cpslk"); pc->pc_copyout_saddr = kva_alloc(ptoa(2)); - if (pc->pc_copyout_saddr == 0) + if (pc->pc_copyout_saddr == NULL) panic("unable to allocate sleepable copyout KVA"); - pc->pc_pmap_eh_va = kva_alloc(ptoa(1)); + pc->pc_pmap_eh_va = (vm_offset_t)kva_alloc(ptoa(1)); if (pc->pc_pmap_eh_va == 0) panic("unable to allocate pmap_extract_and_hold KVA"); pc->pc_pmap_eh_ptep = (char *)vtopte(pc->pc_pmap_eh_va); @@ -758,12 +758,12 @@ pmap_init_reserved_pages(void *dummy __unused) mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); pages = kva_alloc(PAGE_SIZE * 3); - if (pages == 0) + if (pages == NULL) panic("unable to allocate CMAP KVA"); - pc->pc_cmap_pte1 = vtopte(pages); - pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE); - pc->pc_cmap_addr1 = (caddr_t)pages; - pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE); + pc->pc_cmap_pte1 = vtopte((vm_offset_t)pages); + pc->pc_cmap_pte2 = vtopte((vm_offset_t)pages + PAGE_SIZE); + pc->pc_cmap_addr1 = pages; + pc->pc_cmap_addr2 = pages + PAGE_SIZE; pc->pc_qmap_addr = pages + ptoa(2); } } @@ -1038,7 +1038,7 @@ __CONCAT(PMTYPE, init)(void) TAILQ_INIT(&pv_table[i].pv_list); pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); - pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); + pv_chunkbase = kva_alloc(PAGE_SIZE * pv_maxchunks); if (pv_chunkbase == NULL) panic("pmap_init: not enough kvm for pv chunks"); pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); @@ -2060,7 +2060,7 @@ __CONCAT(PMTYPE, pinit)(pmap_t pmap) * page directory table. *** 903 LINES SKIPPED ***home | help
Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?69ea61f1.46085.3dc9c391>
