Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 23 Apr 2026 18:16:21 +0000
From:      John Baldwin <jhb@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: 2c6d8f15bd45 - main - sys: Permit passing pointers to VIRT_IN_DMAP and DMAP_TO_PHYS
Message-ID:  <69ea61f5.45f8f.6fb4de96@gitrepo.freebsd.org>

index | next in thread | raw e-mail

The branch main has been updated by jhb:

URL: https://cgit.FreeBSD.org/src/commit/?id=2c6d8f15bd45afddb87d9a435239f4280ebd26f7

commit 2c6d8f15bd45afddb87d9a435239f4280ebd26f7
Author:     John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2026-04-23 17:05:54 +0000
Commit:     John Baldwin <jhb@FreeBSD.org>
CommitDate: 2026-04-23 17:05:54 +0000

    sys: Permit passing pointers to VIRT_IN_DMAP and DMAP_TO_PHYS
    
    Add explicit uintptr_t casts to the arguments to these macros so that
    the work both with virtual addresses (e.g. vm_offset_t) and pointers.
    
    Drop no-longer-needed casts in various invocations of DMAP_TO_PHYS.
    
    Effort:         CHERI upstreaming
    Reviewed by:    kib
    Sponsored by:   AFRL, DARPA
    Pull Request:   https://github.com/freebsd/freebsd-src/pull/2068
---
 sys/amd64/amd64/pmap.c            | 33 +++++++++++++++------------------
 sys/amd64/include/vmparam.h       | 16 ++++++++++------
 sys/arm64/arm64/pmap.c            | 14 +++++++-------
 sys/arm64/include/vmparam.h       | 17 +++++++++++------
 sys/contrib/ncsw/user/env/xx.c    |  2 +-
 sys/kern/uipc_ktls.c              |  4 ++--
 sys/powerpc/aim/mmu_radix.c       | 12 ++++++------
 sys/powerpc/aim/moea64_native.c   |  2 +-
 sys/powerpc/booke/pmap_64.c       |  8 ++++----
 sys/powerpc/include/vmparam.h     |  2 +-
 sys/powerpc/powernv/opal_hmi.c    |  2 +-
 sys/powerpc/powerpc/uma_machdep.c |  2 +-
 sys/riscv/include/vmparam.h       | 17 +++++++++++------
 sys/riscv/riscv/pmap.c            |  4 ++--
 sys/vm/uma_core.c                 |  2 +-
 sys/vm/vm_page.c                  |  2 +-
 16 files changed, 75 insertions(+), 64 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 5592dc3bb683..e5f21d326cfc 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -545,7 +545,7 @@ static __inline int
 pc_to_domain(struct pv_chunk *pc)
 {
 
-	return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
+	return (vm_phys_domain(DMAP_TO_PHYS(pc)));
 }
 #else
 static __inline int
@@ -4834,7 +4834,7 @@ pmap_release(pmap_t pmap)
 	KASSERT(CPU_EMPTY(&pmap->pm_active),
 	    ("releasing active pmap %p", pmap));
 
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pmltop));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pmap->pm_pmltop));
 
 	if (pmap_is_la57(pmap)) {
 		for (i = NPML5EPG / 2; i < NPML5EPG; i++)
@@ -4863,8 +4863,7 @@ pmap_release(pmap_t pmap)
 	pmap_pt_page_count_pinit(pmap, -1);
 
 	if (pmap->pm_pmltopu != NULL) {
-		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->
-		    pm_pmltopu));
+		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pmap->pm_pmltopu));
 		pmap_free_pt_page(NULL, m, false);
 		pmap_pt_page_count_pinit(pmap, -1);
 	}
@@ -5349,7 +5348,7 @@ reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
 			PV_STAT(counter_u64_add(pc_chunk_count, -1));
 			PV_STAT(counter_u64_add(pc_chunk_frees, 1));
 			/* Entire chunk is free; return it. */
-			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc));
 			dump_drop_page(m_pc->phys_addr);
 			mtx_lock(&pvc->pvc_lock);
 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
@@ -5450,7 +5449,7 @@ free_pv_chunk_dequeued(struct pv_chunk *pc)
 	PV_STAT(counter_u64_add(pc_chunk_frees, 1));
 	counter_u64_add(pv_page_count, -1);
 	/* entire chunk is free, return it */
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc));
 	dump_drop_page(m->phys_addr);
 	vm_page_unwire_noq(m);
 	vm_page_free(m);
@@ -10708,7 +10707,7 @@ retry:
 			goto retry;
 		mphys = VM_PAGE_TO_PHYS(m);
 		*pde = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
-		PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->ref_count++;
+		PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde))->ref_count++;
 	} else {
 		MPASS((*pde & X86_PG_PS) == 0);
 		mphys = *pde & PG_FRAME;
@@ -10827,8 +10826,7 @@ pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
 			*pde = pa | pg_g | X86_PG_PS | X86_PG_RW |
 			    X86_PG_V | X86_PG_A | pg_nx |
 			    pmap_cache_bits(kernel_pmap, mattr, true);
-			PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->
-			    ref_count++;
+			PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde))->ref_count++;
 			inc = NBPDR;
 		} else {
 			pte = pmap_large_map_pte(va);
@@ -10836,8 +10834,7 @@ pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
 			*pte = pa | pg_g | X86_PG_RW | X86_PG_V |
 			    X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap,
 			    mattr, false);
-			PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))->
-			    ref_count++;
+			PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pte))->ref_count++;
 			inc = PAGE_SIZE;
 		}
 	}
@@ -10905,7 +10902,7 @@ pmap_large_unmap(void *svaa, vm_size_t len)
 			    pd, len));
 			pde_store(pde, 0);
 			inc = NBPDR;
-			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
+			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde));
 			m->ref_count--;
 			if (m->ref_count == 0) {
 				*pdpe = 0;
@@ -10919,12 +10916,12 @@ pmap_large_unmap(void *svaa, vm_size_t len)
 		    (u_long)pte, *pte));
 		pte_clear(pte);
 		inc = PAGE_SIZE;
-		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pte));
+		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pte));
 		m->ref_count--;
 		if (m->ref_count == 0) {
 			*pde = 0;
 			SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
-			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
+			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde));
 			m->ref_count--;
 			if (m->ref_count == 0) {
 				*pdpe = 0;
@@ -11227,7 +11224,7 @@ pmap_pti_wire_pte(void *pte)
 	vm_page_t m;
 
 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pte));
 	m->ref_count++;
 }
 
@@ -11237,7 +11234,7 @@ pmap_pti_unwire_pde(void *pde, bool only_ref)
 	vm_page_t m;
 
 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pde));
 	MPASS(only_ref || m->ref_count > 1);
 	pmap_pti_free_page(m);
 }
@@ -11249,7 +11246,7 @@ pmap_pti_unwire_pte(void *pte, vm_offset_t va)
 	pd_entry_t *pde;
 
 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pte));
 	if (pmap_pti_free_page(m)) {
 		pde = pmap_pti_pde(va);
 		MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V);
@@ -12344,7 +12341,7 @@ DB_SHOW_COMMAND(ptpages, pmap_ptpages)
 		}
 	} else {
 		ptpages_show_pml4(PHYS_TO_VM_PAGE(DMAP_TO_PHYS(
-		    (vm_offset_t)pmap->pm_pmltop)), NUP4ML4E, PG_V);
+		    pmap->pm_pmltop)), NUP4ML4E, PG_V);
 	}
 }
 #endif
diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
index d2ac3c6648b2..ed17922642c0 100644
--- a/sys/amd64/include/vmparam.h
+++ b/sys/amd64/include/vmparam.h
@@ -246,8 +246,11 @@
  * vt fb startup needs to be reworked.
  */
 #define	PHYS_IN_DMAP(pa)	(dmaplimit == 0 || (pa) < dmaplimit)
-#define	VIRT_IN_DMAP(va)	\
-    ((va) >= kva_layout.dmap_low && (va) < kva_layout.dmap_low + dmaplimit)
+#define	VIRT_IN_DMAP(va) __extension__ ({				\
+	uintptr_t _va = (uintptr_t)(va);				\
+									\
+	(_va >= kva_layout.dmap_low &&					\
+	    _va < kva_layout.dmap_low + dmaplimit); })
 
 #define	PMAP_HAS_DMAP	1
 #define	PHYS_TO_DMAP(x)	__extension__ ({				\
@@ -257,10 +260,11 @@
 	(x) + kva_layout.dmap_low; })
 
 #define	DMAP_TO_PHYS(x)	__extension__ ({				\
-	KASSERT(VIRT_IN_DMAP(x),					\
-	    ("virtual address %#jx not covered by the DMAP",		\
-	    (uintmax_t)x));						\
-	(x) - kva_layout.dmap_low; })
+	uintptr_t _x = (uintptr_t)(x);					\
+									\
+	KASSERT(VIRT_IN_DMAP(_x),					\
+	    ("virtual address %p not covered by the DMAP", (void *)_x));\
+	_x - kva_layout.dmap_low; })
 
 /*
  * amd64 maps the page array into KVA so that it can be more easily
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index bc024b4053c6..8b41c4f48fb8 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -334,7 +334,7 @@ vm_offset_t kernel_vm_end = 0;
 static __inline int
 pc_to_domain(struct pv_chunk *pc)
 {
-	return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
+	return (vm_phys_domain(DMAP_TO_PHYS(pc)));
 }
 #else
 static __inline int
@@ -3458,7 +3458,7 @@ reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
 			PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
 			PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
 			/* Entire chunk is free; return it. */
-			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc));
 			dump_drop_page(m_pc->phys_addr);
 			mtx_lock(&pvc->pvc_lock);
 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
@@ -3560,7 +3560,7 @@ free_pv_chunk_dequeued(struct pv_chunk *pc)
 	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
 	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
 	/* entire chunk is free, return it */
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc));
 	dump_drop_page(m->phys_addr);
 	vm_page_unwire_noq(m);
 	vm_page_free(m);
@@ -8527,7 +8527,7 @@ pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
 
 	if (tmpl1 != NULL) {
 		pmap_kenter((vm_offset_t)tmpl1, PAGE_SIZE,
-		    DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET,
+		    DMAP_TO_PHYS(l1) & ~L3_OFFSET,
 		    VM_MEMATTR_WRITE_BACK);
 		l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
 	}
@@ -8722,7 +8722,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
 	 */
 	if (tmpl2 != NULL) {
 		pmap_kenter((vm_offset_t)tmpl2, PAGE_SIZE,
-		    DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET,
+		    DMAP_TO_PHYS(l2) & ~L3_OFFSET,
 		    VM_MEMATTR_WRITE_BACK);
 		l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
 	}
@@ -8799,7 +8799,7 @@ pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va)
 		if (tmpl3 == NULL)
 			return (false);
 		pmap_kenter((vm_offset_t)tmpl3, PAGE_SIZE,
-		    DMAP_TO_PHYS((vm_offset_t)l2c_start) & ~L3_OFFSET,
+		    DMAP_TO_PHYS(l2c_start) & ~L3_OFFSET,
 		    VM_MEMATTR_WRITE_BACK);
 		l2c_start = (pd_entry_t *)(tmpl3 +
 		    ((vm_offset_t)l2c_start & PAGE_MASK));
@@ -8888,7 +8888,7 @@ pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va)
 		if (tmpl3 == NULL)
 			return (false);
 		pmap_kenter((vm_offset_t)tmpl3, PAGE_SIZE,
-		    DMAP_TO_PHYS((vm_offset_t)l3c_start) & ~L3_OFFSET,
+		    DMAP_TO_PHYS(l3c_start) & ~L3_OFFSET,
 		    VM_MEMATTR_WRITE_BACK);
 		l3c_start = (pt_entry_t *)(tmpl3 +
 		    ((vm_offset_t)l3c_start & PAGE_MASK));
diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h
index 781602306436..650c6f5c9225 100644
--- a/sys/arm64/include/vmparam.h
+++ b/sys/arm64/include/vmparam.h
@@ -256,8 +256,12 @@
 #define	PHYS_IN_DMAP(pa)	(PHYS_IN_DMAP_RANGE(pa) && \
     pmap_klookup(PHYS_TO_DMAP(pa), NULL))
 /* True if va is in the dmap range */
-#define	VIRT_IN_DMAP(va)	((va) >= DMAP_MIN_ADDRESS && \
-    (va) < (dmap_max_addr))
+#define	VIRT_IN_DMAP(va)						\
+({									\
+	uintptr_t __va = (uintptr_t)(va);				\
+									\
+	__va >= DMAP_MIN_ADDRESS && __va < (dmap_max_addr);		\
+})
 
 #define	PMAP_HAS_DMAP	1
 #define	PHYS_TO_DMAP(pa)						\
@@ -270,10 +274,11 @@
 
 #define	DMAP_TO_PHYS(va)						\
 ({									\
-	KASSERT(VIRT_IN_DMAP(va),					\
-	    ("%s: VA out of range, VA: 0x%lx", __func__,		\
-	    (vm_offset_t)(va)));					\
-	((va) - DMAP_MIN_ADDRESS) + dmap_phys_base;			\
+	uintptr_t _va = (uintptr_t)(va);				\
+									\
+	KASSERT(VIRT_IN_DMAP(_va),					\
+	    ("%s: VA out of range, VA: %p", __func__, (void *)_va));	\
+	(_va - DMAP_MIN_ADDRESS) + dmap_phys_base;			\
 })
 
 #define	VM_MIN_USER_ADDRESS	(0x0000000000000000UL)
diff --git a/sys/contrib/ncsw/user/env/xx.c b/sys/contrib/ncsw/user/env/xx.c
index 0a502c64e2fb..aaf795ffa57a 100644
--- a/sys/contrib/ncsw/user/env/xx.c
+++ b/sys/contrib/ncsw/user/env/xx.c
@@ -658,7 +658,7 @@ XX_VirtToPhys(void *addr)
 
 	if (PMAP_HAS_DMAP && (vm_offset_t)addr >= DMAP_BASE_ADDRESS &&
 	    (vm_offset_t)addr <= DMAP_MAX_ADDRESS)
-		return (DMAP_TO_PHYS((vm_offset_t)addr));
+		return (DMAP_TO_PHYS(addr));
 	else
 		paddr = pmap_kextract((vm_offset_t)addr);
 
diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c
index 4c3a4085b8db..5f30f2046965 100644
--- a/sys/kern/uipc_ktls.c
+++ b/sys/kern/uipc_ktls.c
@@ -461,7 +461,7 @@ ktls_buffer_release(void *arg __unused, void **store, int count)
 	int i, j;
 
 	for (i = 0; i < count; i++) {
-		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
+		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(store[i]));
 		for (j = 0; j < atop(ktls_maxlen); j++) {
 			(void)vm_page_unwire_noq(m + j);
 			vm_page_free(m + j);
@@ -2816,7 +2816,7 @@ ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m,
 		state->dst_iov[0].iov_base = (char *)state->cbuf +
 		    m->m_epg_1st_off;
 		state->dst_iov[0].iov_len = len;
-		state->parray[0] = DMAP_TO_PHYS((vm_offset_t)state->cbuf);
+		state->parray[0] = DMAP_TO_PHYS(state->cbuf);
 		i = 1;
 	} else {
 		off = m->m_epg_1st_off;
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
index b8be7f188cb6..8b4c8ddcc578 100644
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -1497,7 +1497,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
 			PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
 			PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
 			/* Entire chunk is free; return it. */
-			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc));
 			dump_drop_page(m_pc->phys_addr);
 			mtx_lock(&pv_chunks_mutex);
 			TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
@@ -1587,7 +1587,7 @@ free_pv_chunk(struct pv_chunk *pc)
 	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
 	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
 	/* entire chunk is free, return it */
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc));
 	dump_drop_page(m->phys_addr);
 	vm_page_unwire_noq(m);
 	vm_page_free(m);
@@ -2158,7 +2158,7 @@ mmu_radix_parttab_init(void)
 	uint64_t pagetab;
 
 	mmu_parttab_init();
-	pagetab = RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | \
+	pagetab = RTS_SIZE | DMAP_TO_PHYS(kernel_pmap->pm_pml1) | \
 		         RADIX_PGD_INDEX_SHIFT | PARTTAB_HR;
 	mmu_parttab_update(0, pagetab, 0);
 }
@@ -2181,7 +2181,7 @@ mmu_radix_proctab_init(void)
 
 	isa3_proctab = (void*)PHYS_TO_DMAP(proctab0pa);
 	isa3_proctab->proctab0 =
-	    htobe64(RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) |
+	    htobe64(RTS_SIZE | DMAP_TO_PHYS(kernel_pmap->pm_pml1) |
 		RADIX_PGD_INDEX_SHIFT);
 
 	if (powernv_enabled) {
@@ -3651,7 +3651,7 @@ radix_pgd_release(void *arg __unused, void **store, int count)
 		 * XXX selectively remove dmap and KVA entries so we don't
 		 * need to bzero
 		 */
-		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
+		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(store[i]));
 		for (int j = page_count-1; j >= 0; j--) {
 			vm_page_unwire_noq(&m[j]);
 			SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss);
@@ -4264,7 +4264,7 @@ mmu_radix_pinit(pmap_t pmap)
 	vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid);
 
 	pmap->pm_pid = pid;
-	l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1);
+	l1pa = DMAP_TO_PHYS(pmap->pm_pml1);
 	mmu_radix_update_proctab(pid, l1pa);
 	__asm __volatile("ptesync;isync" : : : "memory");
 
diff --git a/sys/powerpc/aim/moea64_native.c b/sys/powerpc/aim/moea64_native.c
index a3f54940ab1a..b79da6c462ac 100644
--- a/sys/powerpc/aim/moea64_native.c
+++ b/sys/powerpc/aim/moea64_native.c
@@ -626,7 +626,7 @@ moea64_bootstrap_native(vm_offset_t kernelstart, vm_offset_t kernelend)
 	if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
 		bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE);
 		moea64_part_table[0].pagetab = htobe64(
-			(DMAP_TO_PHYS((vm_offset_t)moea64_pteg_table)) |
+			(DMAP_TO_PHYS(moea64_pteg_table)) |
 			(uintptr_t)(flsl((moea64_pteg_count - 1) >> 11)));
 	}
 	ENABLE_TRANS(msr);
diff --git a/sys/powerpc/booke/pmap_64.c b/sys/powerpc/booke/pmap_64.c
index 06c7c8cf76be..a233248da4bd 100644
--- a/sys/powerpc/booke/pmap_64.c
+++ b/sys/powerpc/booke/pmap_64.c
@@ -339,19 +339,19 @@ ptbl_unhold(pmap_t pmap, vm_offset_t va)
 	ptbl = pdir[pdir_idx];
 
 	/* decrement hold count */
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(ptbl));
 
 	if (!unhold_free_page(pmap, m))
 		return (0);
 
 	pdir[pdir_idx] = NULL;
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pdir));
 
 	if (!unhold_free_page(pmap, m))
 		return (1);
 
 	pdir_l1[pdir_l1_idx] = NULL;
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pdir_l1));
 
 	if (!unhold_free_page(pmap, m))
 		return (1);
@@ -372,7 +372,7 @@ ptbl_hold(pmap_t pmap, pte_t *ptbl)
 	KASSERT((pmap != kernel_pmap),
 		("ptbl_hold: holding kernel ptbl!"));
 
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(ptbl));
 	m->ref_count++;
 }
 
diff --git a/sys/powerpc/include/vmparam.h b/sys/powerpc/include/vmparam.h
index 67fce74ade55..052fb2ab60af 100644
--- a/sys/powerpc/include/vmparam.h
+++ b/sys/powerpc/include/vmparam.h
@@ -317,7 +317,7 @@ extern	int vm_level_0_order;
 	(x) | DMAP_BASE_ADDRESS; })
 #define DMAP_TO_PHYS(x) ({						\
 	KASSERT(hw_direct_map, ("Direct map not provided by PMAP"));	\
-	(x) &~ DMAP_BASE_ADDRESS; })
+	(uintptr_t)(x) &~ DMAP_BASE_ADDRESS; })
 
 /*
  * No non-transparent large page support in the pmap.
diff --git a/sys/powerpc/powernv/opal_hmi.c b/sys/powerpc/powernv/opal_hmi.c
index 0dc179be3750..7784e8da55da 100644
--- a/sys/powerpc/powernv/opal_hmi.c
+++ b/sys/powerpc/powernv/opal_hmi.c
@@ -89,7 +89,7 @@ opal_hmi_handler2(struct trapframe *frame)
 	int err;
 
 	*flags = 0;
-	err = opal_call(OPAL_HANDLE_HMI2, DMAP_TO_PHYS((vm_offset_t)flags));
+	err = opal_call(OPAL_HANDLE_HMI2, DMAP_TO_PHYS(flags));
 
 	if (be64toh(*flags) & OPAL_HMI_FLAGS_TOD_TB_FAIL)
 		panic("TOD/TB recovery failure");
diff --git a/sys/powerpc/powerpc/uma_machdep.c b/sys/powerpc/powerpc/uma_machdep.c
index 47ee785f4be8..923a348d35a7 100644
--- a/sys/powerpc/powerpc/uma_machdep.c
+++ b/sys/powerpc/powerpc/uma_machdep.c
@@ -80,7 +80,7 @@ uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
 	vm_page_t m;
 
 	if (hw_direct_map)
-		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)mem));
+		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(mem));
 	else {
 		m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)mem));
 		pmap_kremove((vm_offset_t)mem);
diff --git a/sys/riscv/include/vmparam.h b/sys/riscv/include/vmparam.h
index c750791bb280..b9f4b9fc802f 100644
--- a/sys/riscv/include/vmparam.h
+++ b/sys/riscv/include/vmparam.h
@@ -171,8 +171,12 @@
 #define	PHYS_IN_DMAP(pa)	((pa) >= DMAP_MIN_PHYSADDR && \
     (pa) < DMAP_MAX_PHYSADDR)
 /* True if va is in the dmap range */
-#define	VIRT_IN_DMAP(va)	((va) >= DMAP_MIN_ADDRESS && \
-    (va) < (dmap_max_addr))
+#define	VIRT_IN_DMAP(va)						\
+({									\
+	uintptr_t __va = (uintptr_t)(va);				\
+									\
+	__va >= DMAP_MIN_ADDRESS && __va < (dmap_max_addr);		\
+})
 
 #define	PMAP_HAS_DMAP	1
 #define	PHYS_TO_DMAP(pa)						\
@@ -185,10 +189,11 @@
 
 #define	DMAP_TO_PHYS(va)						\
 ({									\
-	KASSERT(VIRT_IN_DMAP(va),					\
-	    ("%s: VA out of range, VA: 0x%lx", __func__,		\
-	    (vm_offset_t)(va)));					\
-	((va) - DMAP_MIN_ADDRESS) + dmap_phys_base;			\
+	uintptr_t _va = (uintptr_t)(va);				\
+									\
+	KASSERT(VIRT_IN_DMAP(_va),					\
+	    ("%s: VA out of range, VA: %p", __func__, (void *)_va));	\
+	(_va - DMAP_MIN_ADDRESS) + dmap_phys_base;			\
 })
 
 #define	VM_MIN_USER_ADDRESS		(0x0000000000000000UL)
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 050dc941b364..bae106dcbfbc 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -1924,7 +1924,7 @@ pmap_release(pmap_t pmap)
 
 finish:
 	npages = pmap->pm_stage == PM_STAGE2 ? 4 : 1;
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_top));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pmap->pm_top));
 	for (i = 0; i < npages; i++) {
 		vm_page_unwire_noq(m);
 		vm_page_free(m);
@@ -2132,7 +2132,7 @@ free_pv_chunk(struct pv_chunk *pc)
 	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
 	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
 	/* entire chunk is free, return it */
-	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(pc));
 	dump_drop_page(m->phys_addr);
 	vm_page_unwire_noq(m);
 	vm_page_free(m);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index e6e872232f31..5342d8ccd217 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -2168,7 +2168,7 @@ uma_small_free(void *mem, vm_size_t size, uint8_t flags)
 	vm_page_t m;
 	vm_paddr_t pa;
 
-	pa = DMAP_TO_PHYS((vm_offset_t)mem);
+	pa = DMAP_TO_PHYS(mem);
 	dump_drop_page(pa);
 	m = PHYS_TO_VM_PAGE(pa);
 	vm_page_unwire_noq(m);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 2def369d739f..bdea77bfabf0 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -658,7 +658,7 @@ vm_page_startup(vm_offset_t vaddr)
 	 * included in a crash dump.  Since the message buffer is accessed
 	 * through the direct map, they are not automatically included.
 	 */
-	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
+	pa = DMAP_TO_PHYS(msgbufp->msg_ptr);
 	last_pa = pa + round_page(msgbufsize);
 	while (pa < last_pa) {
 		dump_add_page(pa);


home | help

Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?69ea61f5.45f8f.6fb4de96>