Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 13 Jun 2025 18:24:16 GMT
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: 330b17e1cf5c - main - vm: remove pa_index from the machine-independent layer
Message-ID:  <202506131824.55DIOGon004145@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by alc:

URL: https://cgit.FreeBSD.org/src/commit/?id=330b17e1cf5c2e259f99ad6ac21eda6bd92a0836

commit 330b17e1cf5c2e259f99ad6ac21eda6bd92a0836
Author:     Alan Cox <alc@FreeBSD.org>
AuthorDate: 2025-06-12 20:12:36 +0000
Commit:     Alan Cox <alc@FreeBSD.org>
CommitDate: 2025-06-13 18:23:28 +0000

    vm: remove pa_index from the machine-independent layer
    
    After the demise of vm_page_lock(), the only remaining uses of
    pa_index() are in various pmap implementations.  In many cases, e.g.,
    amd64, the pmap implementations already provided their own definitions,
    often identical to the machine-independent one.  For those that didn't
    provide one, this change adds it.
    
    Reviewed by:    kib, markj
    Differential Revision:  https://reviews.freebsd.org/D50823
---
 sys/amd64/amd64/pmap.c      | 1 -
 sys/powerpc/aim/mmu_oea64.c | 5 ++++-
 sys/riscv/riscv/pmap.c      | 3 ++-
 sys/vm/vm_page.h            | 8 --------
 4 files changed, 6 insertions(+), 11 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 4ad1335329da..2962f2823596 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -342,7 +342,6 @@ safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
 #define PV_STAT(x)	do { } while (0)
 #endif
 
-#undef pa_index
 #ifdef NUMA
 #define	pa_index(pa)	({					\
 	KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end,	\
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 40dd232f47c1..79cea408bb5f 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -125,6 +125,9 @@ uintptr_t moea64_get_unique_vsid(void);
 #define PV_LOCK_COUNT	MAXCPU
 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
 
+#define	PV_LOCK_SHIFT	21
+#define	pa_index(pa)	((pa) >> PV_LOCK_SHIFT)
+
 /*
  * Cheap NUMA-izing of the pv locks, to reduce contention across domains.
  * NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the
@@ -145,7 +148,7 @@ static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
 
 /* Superpage PV lock */
 
-#define	PV_LOCK_SIZE		(1<<PDRSHIFT)
+#define	PV_LOCK_SIZE		(1 << PV_LOCK_SHIFT)
 
 static __always_inline void
 moea64_sp_pv_lock(vm_paddr_t pa)
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 46dea0d7dc85..f756e98335f3 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -183,12 +183,13 @@
 
 #define	pmap_l1_pindex(v)	(NUL2E + ((v) >> L1_SHIFT))
 #define	pmap_l2_pindex(v)	((v) >> L2_SHIFT)
+#define	pa_index(pa)		((pa) >> L2_SHIFT)
 #define	pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
 
 #define	NPV_LIST_LOCKS	MAXCPU
 
 #define	PHYS_TO_PV_LIST_LOCK(pa)	\
-			(&pv_list_locks[pmap_l2_pindex(pa) % NPV_LIST_LOCKS])
+			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
 
 #define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)	do {	\
 	struct rwlock **_lockp = (lockp);		\
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 062cf00b5b33..ba3f88864661 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -336,14 +336,6 @@ SLIST_HEAD(spglist, vm_page);
 extern vm_page_t bogus_page;
 #endif	/* _KERNEL */
 
-#if defined(__arm__)
-#define	PDRSHIFT	PDR_SHIFT
-#elif !defined(PDRSHIFT)
-#define PDRSHIFT	21
-#endif
-
-#define	pa_index(pa)	((pa) >> PDRSHIFT)
-
 /*
  * The vm_page's aflags are updated using atomic operations.  To set or clear
  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202506131824.55DIOGon004145>