Date: Thu, 30 Apr 2026 07:18:29 +0000 From: Jean-=?utf-8?Q?S=C3=A9bast?==?utf-8?Q?ien P=C3=A9?=dron <dumbbell@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: 783d018cf954 - main - linuxkpi: Add `struct xa_limit` support to xarray Message-ID: <69f30245.3699a.5ff2d68e@gitrepo.freebsd.org>
index | next in thread | raw e-mail
The branch main has been updated by dumbbell: URL: https://cgit.FreeBSD.org/src/commit/?id=783d018cf954f99032a0a4f655af8916024598a8 commit 783d018cf954f99032a0a4f655af8916024598a8 Author: Jean-Sébastien Pédron <dumbbell@FreeBSD.org> AuthorDate: 2026-04-13 21:39:22 +0000 Commit: Jean-Sébastien Pédron <dumbbell@FreeBSD.org> CommitDate: 2026-04-30 07:05:01 +0000 linuxkpi: Add `struct xa_limit` support to xarray The `xa_alloc*()` functions family takes a `struct xa_limit` to describe the range of IDs the caller wants to allocate. We were using a single mask to qualify a maximum ID only. This commit changes that to use the same `struct xa_limit`. The logic did not change, except it now supports a minimum ID as well. The definition of `XA_LIMIT()` macro is adapted, as well as the definitions of `xa_limit_*` (only `xa_limit_32b` existed, the other two are added with this commit). The DRM generic code started to use this `struct xa_limit` in Linux 6.12. Reviewed by: bz Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D56445 --- sys/compat/linuxkpi/common/include/linux/xarray.h | 26 ++++++++------ sys/compat/linuxkpi/common/src/linux_xarray.c | 41 ++++++++++------------- 2 files changed, 34 insertions(+), 33 deletions(-) diff --git a/sys/compat/linuxkpi/common/include/linux/xarray.h b/sys/compat/linuxkpi/common/include/linux/xarray.h index fba36eea0ab5..e6511130d50c 100644 --- a/sys/compat/linuxkpi/common/include/linux/xarray.h +++ b/sys/compat/linuxkpi/common/include/linux/xarray.h @@ -34,9 +34,6 @@ #include <sys/lock.h> #include <sys/mutex.h> -#define XA_LIMIT(min, max) \ - ({ CTASSERT((min) == 0); (uint32_t)(max); }) - #define XA_FLAGS_ALLOC (1U << 0) #define XA_FLAGS_LOCK_IRQ (1U << 1) #define XA_FLAGS_ALLOC1 (1U << 2) @@ -47,8 +44,6 @@ #define xa_is_err(x) \ IS_ERR(x) -#define xa_limit_32b XA_LIMIT(0, 0xFFFFFFFF) - #define XA_ASSERT_LOCKED(xa) mtx_assert(&(xa)->xa_lock, MA_OWNED) #define xa_lock(xa) mtx_lock(&(xa)->xa_lock) #define xa_unlock(xa) mtx_unlock(&(xa)->xa_lock) @@ -59,15 +54,26 @@ struct xarray { uint32_t xa_flags; /* see XA_FLAGS_XXX */ }; +struct xa_limit { + uint32_t max; + uint32_t min; +}; + +#define XA_LIMIT(min_, max_) (struct xa_limit){ .min = (min_), .max = (max_) } + +#define xa_limit_16b XA_LIMIT(0, USHRT_MAX) +#define xa_limit_31b XA_LIMIT(0, INT_MAX) +#define xa_limit_32b XA_LIMIT(0, UINT_MAX) + /* * Extensible arrays API implemented as a wrapper * around the radix tree implementation. */ void *xa_erase(struct xarray *, uint32_t); void *xa_load(struct xarray *, uint32_t); -int xa_alloc(struct xarray *, uint32_t *, void *, uint32_t, gfp_t); -int xa_alloc_cyclic(struct xarray *, uint32_t *, void *, uint32_t, uint32_t *, gfp_t); -int xa_alloc_cyclic_irq(struct xarray *, uint32_t *, void *, uint32_t, uint32_t *, gfp_t); +int xa_alloc(struct xarray *, uint32_t *, void *, struct xa_limit, gfp_t); +int xa_alloc_cyclic(struct xarray *, uint32_t *, void *, struct xa_limit, uint32_t *, gfp_t); +int xa_alloc_cyclic_irq(struct xarray *, uint32_t *, void *, struct xa_limit, uint32_t *, gfp_t); int xa_insert(struct xarray *, uint32_t, void *, gfp_t); void *xa_store(struct xarray *, uint32_t, void *, gfp_t); void xa_init_flags(struct xarray *, uint32_t); @@ -83,8 +89,8 @@ void *xa_next(struct xarray *, unsigned long *, bool); * Unlocked version of functions above. */ void *__xa_erase(struct xarray *, uint32_t); -int __xa_alloc(struct xarray *, uint32_t *, void *, uint32_t, gfp_t); -int __xa_alloc_cyclic(struct xarray *, uint32_t *, void *, uint32_t, uint32_t *, gfp_t); +int __xa_alloc(struct xarray *, uint32_t *, void *, struct xa_limit, gfp_t); +int __xa_alloc_cyclic(struct xarray *, uint32_t *, void *, struct xa_limit, uint32_t *, gfp_t); int __xa_insert(struct xarray *, uint32_t, void *, gfp_t); void *__xa_store(struct xarray *, uint32_t, void *, gfp_t); bool __xa_empty(struct xarray *); diff --git a/sys/compat/linuxkpi/common/src/linux_xarray.c b/sys/compat/linuxkpi/common/src/linux_xarray.c index 8caefbaf7e50..4a305f8d58b6 100644 --- a/sys/compat/linuxkpi/common/src/linux_xarray.c +++ b/sys/compat/linuxkpi/common/src/linux_xarray.c @@ -115,19 +115,16 @@ xa_vm_wait_locked(struct xarray *xa) * available to complete the radix tree insertion. */ int -__xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp) +__xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, struct xa_limit limit, gfp_t gfp) { int retval; XA_ASSERT_LOCKED(xa); - /* mask should allow to allocate at least one item */ - MPASS(mask > ((xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0)); - - /* mask can be any power of two value minus one */ - MPASS((mask & (mask + 1)) == 0); + MPASS(limit.max > limit.min); *pindex = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0; + *pindex = MAX(*pindex, limit.min); if (ptr == NULL) ptr = NULL_VALUE; retry: @@ -135,7 +132,7 @@ retry: switch (retval) { case -EEXIST: - if (likely(*pindex != mask)) { + if (likely(*pindex < limit.max)) { (*pindex)++; goto retry; } @@ -154,7 +151,7 @@ retry: } int -xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp) +xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, struct xa_limit limit, gfp_t gfp) { int retval; @@ -162,7 +159,7 @@ xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gf ptr = NULL_VALUE; xa_lock(xa); - retval = __xa_alloc(xa, pindex, ptr, mask, gfp); + retval = __xa_alloc(xa, pindex, ptr, limit, gfp); xa_unlock(xa); return (retval); @@ -175,7 +172,7 @@ xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gf * beginning of the array. If the xarray is full -ENOMEM is returned. */ int -__xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, +__xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, struct xa_limit limit, uint32_t *pnext_index, gfp_t gfp) { int retval; @@ -183,13 +180,10 @@ __xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, XA_ASSERT_LOCKED(xa); - /* mask should allow to allocate at least one item */ - MPASS(mask > ((xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0)); - - /* mask can be any power of two value minus one */ - MPASS((mask & (mask + 1)) == 0); + MPASS(limit.max > limit.min); *pnext_index = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0; + *pnext_index = MAX(*pnext_index, limit.min); if (ptr == NULL) ptr = NULL_VALUE; retry: @@ -197,14 +191,15 @@ retry: switch (retval) { case -EEXIST: - if (unlikely(*pnext_index == mask) && !timeout--) { + if (unlikely(*pnext_index == limit.max) && !timeout--) { retval = -ENOMEM; break; } (*pnext_index)++; - (*pnext_index) &= mask; - if (*pnext_index == 0 && (xa->xa_flags & XA_FLAGS_ALLOC1) != 0) - (*pnext_index)++; + if (*pnext_index > limit.max) { + *pnext_index = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0; + *pnext_index = MAX(*pnext_index, limit.min); + } goto retry; case -ENOMEM: if (likely(gfp & M_WAITOK)) { @@ -221,13 +216,13 @@ retry: } int -xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, +xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, struct xa_limit limit, uint32_t *pnext_index, gfp_t gfp) { int retval; xa_lock(xa); - retval = __xa_alloc_cyclic(xa, pindex, ptr, mask, pnext_index, gfp); + retval = __xa_alloc_cyclic(xa, pindex, ptr, limit, pnext_index, gfp); xa_unlock(xa); return (retval); @@ -235,12 +230,12 @@ xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, int xa_alloc_cyclic_irq(struct xarray *xa, uint32_t *pindex, void *ptr, - uint32_t mask, uint32_t *pnext_index, gfp_t gfp) + struct xa_limit limit, uint32_t *pnext_index, gfp_t gfp) { int retval; xa_lock_irq(xa); - retval = __xa_alloc_cyclic(xa, pindex, ptr, mask, pnext_index, gfp); + retval = __xa_alloc_cyclic(xa, pindex, ptr, limit, pnext_index, gfp); xa_unlock_irq(xa); return (retval);home | help
Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?69f30245.3699a.5ff2d68e>
