Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 23 Apr 2026 18:16:09 +0000
From:      John Baldwin <jhb@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: ef159bdd54ca - main - riscv: Switch the address argument to cpu_dcache_* to a pointer
Message-ID:  <69ea61e9.45f8b.31c769c@gitrepo.freebsd.org>

index | next in thread | raw e-mail

The branch main has been updated by jhb:

URL: https://cgit.FreeBSD.org/src/commit/?id=ef159bdd54cada6db8b0af6380fa9269d0f26a04

commit ef159bdd54cada6db8b0af6380fa9269d0f26a04
Author:     John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2026-04-23 17:05:53 +0000
Commit:     John Baldwin <jhb@FreeBSD.org>
CommitDate: 2026-04-23 17:05:53 +0000

    riscv: Switch the address argument to cpu_dcache_* to a pointer
    
    No functional change, but this is friendlier for CHERI.
    
    Effort:         CHERI upstreaming
    Reviewed by:    kib
    Sponsored by:   AFRL, DARPA
    Pull Request:   https://github.com/freebsd/freebsd-src/pull/2068
---
 sys/riscv/include/cpufunc.h      |  8 ++++----
 sys/riscv/riscv/busdma_bounce.c  | 18 +++++++++---------
 sys/riscv/riscv/cbo.c            | 27 +++++++++++++++------------
 sys/riscv/riscv/pmap.c           |  2 +-
 sys/riscv/sifive/sifive_ccache.c |  4 ++--
 sys/riscv/thead/thead.c          | 18 +++++++++---------
 6 files changed, 40 insertions(+), 37 deletions(-)

diff --git a/sys/riscv/include/cpufunc.h b/sys/riscv/include/cpufunc.h
index c39f17131eb7..e7bfeeb0cf59 100644
--- a/sys/riscv/include/cpufunc.h
+++ b/sys/riscv/include/cpufunc.h
@@ -135,7 +135,7 @@ hfence_gvma(void)
 
 extern int64_t dcache_line_size;
 
-typedef void (*cache_op_t)(vm_offset_t start, vm_size_t size);
+typedef void (*cache_op_t)(void *start, size_t size);
 
 struct riscv_cache_ops {
 	cache_op_t dcache_wbinv_range;
@@ -146,21 +146,21 @@ struct riscv_cache_ops {
 extern struct riscv_cache_ops cache_ops;
 
 static __inline void
-cpu_dcache_wbinv_range(vm_offset_t addr, vm_size_t size)
+cpu_dcache_wbinv_range(void *addr, size_t size)
 {
 	if (cache_ops.dcache_wbinv_range != NULL)
 		cache_ops.dcache_wbinv_range(addr, size);
 }
 
 static __inline void
-cpu_dcache_inv_range(vm_offset_t addr, vm_size_t size)
+cpu_dcache_inv_range(void *addr, size_t size)
 {
 	if (cache_ops.dcache_inv_range != NULL)
 		cache_ops.dcache_inv_range(addr, size);
 }
 
 static __inline void
-cpu_dcache_wb_range(vm_offset_t addr, vm_size_t size)
+cpu_dcache_wb_range(void *addr, size_t size)
 {
 	if (cache_ops.dcache_wb_range != NULL)
 		cache_ops.dcache_wb_range(addr, size);
diff --git a/sys/riscv/riscv/busdma_bounce.c b/sys/riscv/riscv/busdma_bounce.c
index 452cead3c6b6..f7569d8f58b4 100644
--- a/sys/riscv/riscv/busdma_bounce.c
+++ b/sys/riscv/riscv/busdma_bounce.c
@@ -750,15 +750,15 @@ bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
 }
 
 static void
-dma_preread_safe(vm_offset_t va, vm_size_t size)
+dma_preread_safe(char *va, size_t size)
 {
 	/*
 	 * Write back any partial cachelines immediately before and
 	 * after the DMA region.
 	 */
-	if (va & (dcache_line_size - 1))
+	if (!__is_aligned(va, dcache_line_size))
 		cpu_dcache_wb_range(va, 1);
-	if ((va + size) & (dcache_line_size - 1))
+	if (!__is_aligned(va + size, dcache_line_size))
 		cpu_dcache_wb_range(va + size, 1);
 
 	cpu_dcache_inv_range(va, size);
@@ -795,7 +795,7 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
 		switch (op) {
 		case BUS_DMASYNC_PREWRITE:
 		case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
-			cpu_dcache_wb_range((vm_offset_t)va, len);
+			cpu_dcache_wb_range(va, len);
 			break;
 		case BUS_DMASYNC_PREREAD:
 			/*
@@ -808,11 +808,11 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
 			 * misalignment.  Buffers which are not mbufs bounce if
 			 * they are not aligned to a cacheline.
 			 */
-			dma_preread_safe((vm_offset_t)va, len);
+			dma_preread_safe(va, len);
 			break;
 		case BUS_DMASYNC_POSTREAD:
 		case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
-			cpu_dcache_inv_range((vm_offset_t)va, len);
+			cpu_dcache_inv_range(va, len);
 			break;
 		default:
 			panic("unsupported combination of sync operations: "
@@ -861,7 +861,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
 				if (tempvaddr != NULL)
 					pmap_quick_remove_page(tempvaddr);
 				if ((dmat->bounce_flags & BF_COHERENT) == 0)
-					cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
+					cpu_dcache_wb_range(bpage->vaddr,
 					    bpage->datacount);
 				bpage = STAILQ_NEXT(bpage, links);
 			}
@@ -869,7 +869,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
 		} else if ((op & BUS_DMASYNC_PREREAD) != 0) {
 			while (bpage != NULL) {
 				if ((dmat->bounce_flags & BF_COHERENT) == 0)
-					cpu_dcache_wbinv_range((vm_offset_t)bpage->vaddr,
+					cpu_dcache_wbinv_range(bpage->vaddr,
 					    bpage->datacount);
 				bpage = STAILQ_NEXT(bpage, links);
 			}
@@ -878,7 +878,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
 		if ((op & BUS_DMASYNC_POSTREAD) != 0) {
 			while (bpage != NULL) {
 				if ((dmat->bounce_flags & BF_COHERENT) == 0)
-					cpu_dcache_inv_range((vm_offset_t)bpage->vaddr,
+					cpu_dcache_inv_range(bpage->vaddr,
 					    bpage->datacount);
 				tempvaddr = NULL;
 				datavaddr = bpage->datavaddr;
diff --git a/sys/riscv/riscv/cbo.c b/sys/riscv/riscv/cbo.c
index 9b8891c514af..1684a61951cb 100644
--- a/sys/riscv/riscv/cbo.c
+++ b/sys/riscv/riscv/cbo.c
@@ -33,25 +33,26 @@
 #include <machine/cbo.h>
 
 static void
-cbo_zicbom_cpu_dcache_wbinv_range(vm_offset_t va, vm_size_t len)
+cbo_zicbom_cpu_dcache_wbinv_range(void *va, vm_size_t len)
 {
-	vm_offset_t addr;
+	char *addr, *end;
 
 	/*
 	 * A flush operation atomically performs a clean operation followed by
 	 * an invalidate operation.
 	 */
 
-	va &= ~(dcache_line_size - 1);
-	for (addr = va; addr < va + len; addr += dcache_line_size)
+	end = (char *)va + len;
+	va = __align_down(va, dcache_line_size);
+	for (addr = va; addr < end; addr += dcache_line_size)
 		__asm __volatile(".option push; .option arch, +zicbom\n"
 				 "cbo.flush (%0); .option pop\n" :: "r"(addr));
 }
 
 static void
-cbo_zicbom_cpu_dcache_inv_range(vm_offset_t va, vm_size_t len)
+cbo_zicbom_cpu_dcache_inv_range(void *va, vm_size_t len)
 {
-	vm_offset_t addr;
+	char *addr, *end;
 
 	/*
 	 * An invalidate operation makes data from store operations performed by
@@ -60,16 +61,17 @@ cbo_zicbom_cpu_dcache_inv_range(vm_offset_t va, vm_size_t len)
 	 * block from the set of coherent caches up to that point.
 	 */
 
-	va &= ~(dcache_line_size - 1);
-	for (addr = va; addr < va + len; addr += dcache_line_size)
+	end = (char *)va + len;
+	va = __align_down(va, dcache_line_size);
+	for (addr = va; addr < end; addr += dcache_line_size)
 		__asm __volatile(".option push; .option arch, +zicbom\n"
 				 "cbo.inval (%0); .option pop\n" :: "r"(addr));
 }
 
 static void
-cbo_zicbom_cpu_dcache_wb_range(vm_offset_t va, vm_size_t len)
+cbo_zicbom_cpu_dcache_wb_range(void *va, vm_size_t len)
 {
-	vm_offset_t addr;
+	char *addr, *end;
 
 	/*
 	 * A clean operation makes data from store operations performed by the
@@ -80,8 +82,9 @@ cbo_zicbom_cpu_dcache_wb_range(vm_offset_t va, vm_size_t len)
 	 * previous invalidate, clean, or flush operation on the cache block.
 	 */
 
-	va &= ~(dcache_line_size - 1);
-	for (addr = va; addr < va + len; addr += dcache_line_size)
+	end = (char *)va + len;
+	va = __align_down(va, dcache_line_size);
+	for (addr = va; addr < end; addr += dcache_line_size)
 		__asm __volatile(".option push; .option arch, +zicbom\n"
 				 "cbo.clean (%0); .option pop\n" :: "r"(addr));
 }
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 55f820494ca0..dfa341bd4bd6 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -5121,7 +5121,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
 	if (anychanged) {
 		pmap_invalidate_range(kernel_pmap, base, tmpva);
 		if (mode == VM_MEMATTR_UNCACHEABLE)
-			cpu_dcache_wbinv_range(base, size);
+			cpu_dcache_wbinv_range((void *)base, size);
 	}
 
 	return (error);
diff --git a/sys/riscv/sifive/sifive_ccache.c b/sys/riscv/sifive/sifive_ccache.c
index 9006d02aa85e..afa35e56de72 100644
--- a/sys/riscv/sifive/sifive_ccache.c
+++ b/sys/riscv/sifive/sifive_ccache.c
@@ -79,7 +79,7 @@ static struct resource_spec ccache_spec[] = {
  * Non-standard EIC7700 cache-flushing routine.
  */
 static void
-ccache_flush_range(vm_offset_t start, size_t len)
+ccache_flush_range(void *start, size_t len)
 {
 	vm_offset_t paddr;
 	vm_offset_t sva;
@@ -91,7 +91,7 @@ ccache_flush_range(vm_offset_t start, size_t len)
 
 	mb();
 
-	for (sva = start; len > 0;) {
+	for (sva = (vm_offset_t)start; len > 0;) {
 		paddr = pmap_kextract(sva);
 		step = min(PAGE_SIZE - (paddr & PAGE_MASK), len);
 		for (line = rounddown2(paddr, SIFIVE_CCACHE_LINE_SIZE);
diff --git a/sys/riscv/thead/thead.c b/sys/riscv/thead/thead.c
index c72f4f1312e0..19b28aecdf7b 100644
--- a/sys/riscv/thead/thead.c
+++ b/sys/riscv/thead/thead.c
@@ -53,11 +53,11 @@ bool has_errata_thead_pbmt = false;
 #define	THEAD_DCACHE_SIZE	64
 
 static void
-thead_cpu_dcache_wbinv_range(vm_offset_t va, vm_size_t len)
+thead_cpu_dcache_wbinv_range(void *va, size_t len)
 {
-	register vm_offset_t t0 __asm("t0") = rounddown(va, dcache_line_size);
+	register char *t0 __asm("t0") = __align_down(va, dcache_line_size);
 
-	for (; t0 < va + len; t0 += dcache_line_size) {
+	for (; t0 < (char *)va + len; t0 += dcache_line_size) {
 		__asm __volatile(THEAD_DCACHE_CIVA
 		                 :: "r" (t0) : "memory");
 	}
@@ -65,11 +65,11 @@ thead_cpu_dcache_wbinv_range(vm_offset_t va, vm_size_t len)
 }
 
 static void
-thead_cpu_dcache_inv_range(vm_offset_t va, vm_size_t len)
+thead_cpu_dcache_inv_range(void *va, size_t len)
 {
-	register vm_offset_t t0 __asm("t0") = rounddown(va, dcache_line_size);
+	register char *t0 __asm("t0") = __align_down(va, dcache_line_size);
 
-	for (; t0 < va + len; t0 += dcache_line_size) {
+	for (; t0 < (char *)va + len; t0 += dcache_line_size) {
 		__asm __volatile(THEAD_DCACHE_IVA
 				 :: "r" (t0) : "memory");
 	}
@@ -77,11 +77,11 @@ thead_cpu_dcache_inv_range(vm_offset_t va, vm_size_t len)
 }
 
 static void
-thead_cpu_dcache_wb_range(vm_offset_t va, vm_size_t len)
+thead_cpu_dcache_wb_range(void *va, size_t len)
 {
-	register vm_offset_t t0 __asm("t0") = rounddown(va, dcache_line_size);
+	register char *t0 __asm("t0") = __align_down(va, dcache_line_size);
 
-	for (; t0 < va + len; t0 += dcache_line_size) {
+	for (; t0 < (char *)va + len; t0 += dcache_line_size) {
 		__asm __volatile(THEAD_DCACHE_CVA
 				 :: "r" (t0) : "memory");
 	}


home | help

Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?69ea61e9.45f8b.31c769c>