Date: Wed, 7 May 2025 21:44:35 GMT From: Doug Moore <dougm@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: 4c65b51f44af - main - vm_page: drop mpred param from insert_lookup Message-ID: <202505072144.547LiZR0010250@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch main has been updated by dougm: URL: https://cgit.FreeBSD.org/src/commit/?id=4c65b51f44af718bd3fcc1c77d1f771159a3d8bd commit 4c65b51f44af718bd3fcc1c77d1f771159a3d8bd Author: Doug Moore <dougm@FreeBSD.org> AuthorDate: 2025-05-07 21:40:51 +0000 Commit: Doug Moore <dougm@FreeBSD.org> CommitDate: 2025-05-07 21:40:51 +0000 vm_page: drop mpred param from insert_lookup There is no longer a sorted page list maintained by a vm_object, so there is no longer a need for the mpred parameter to vm_page_insert_lookup(). Dropping that parameter creates new functions that also don't need that parameter. Drop all those parameters. Some of the functions have names ending in "_after", where "after" refers to the mpred parameter that is being dropped. So that those names make sense, change "_after" to "_iter" in all of them, because they all take pctrie_iter parameters. Make those parameters last in parameter lists, as "mpred" was before. Reviewed by: alc, markj, kib Differential Revision: https://reviews.freebsd.org/D50199 --- sys/kern/uipc_shm.c | 4 +- sys/vm/phys_pager.c | 4 +- sys/vm/swap_pager.c | 19 ++++---- sys/vm/vm_fault.c | 16 +++---- sys/vm/vm_glue.c | 12 +++-- sys/vm/vm_kern.c | 9 ++-- sys/vm/vm_object.c | 17 +++---- sys/vm/vm_page.c | 126 ++++++++++++++++++++-------------------------------- sys/vm/vm_page.h | 17 ++++--- sys/vm/vm_radix.h | 20 --------- sys/vm/vm_reserv.c | 18 ++++---- sys/vm/vm_reserv.h | 13 +++--- 12 files changed, 104 insertions(+), 171 deletions(-) diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c index 28e90b73aa8d..6f83b875a6b6 100644 --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -232,8 +232,8 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) * lock to page out tobj's pages because tobj is a OBJT_SWAP * type object. */ - rv = vm_page_grab_valid_iter(&m, obj, &pages, idx, - VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY); + rv = vm_page_grab_valid_iter(&m, obj, idx, + VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY, &pages); if (rv != VM_PAGER_OK) { VM_OBJECT_WUNLOCK(obj); if (bootverbose) { diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c index ee39221402fa..3e6766e6f9e9 100644 --- a/sys/vm/phys_pager.c +++ b/sys/vm/phys_pager.c @@ -251,8 +251,8 @@ default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx, for (i = base; i <= end; i++) { ahead = MIN(end - i, PHYSALLOC); - m = vm_page_grab_iter(object, &pages, i, - VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead)); + m = vm_page_grab_iter(object, i, + VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead), &pages); if (!vm_page_all_valid(m)) vm_page_zero_invalid(m, TRUE); KASSERT(m->dirty == 0, diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 38203e4cd0af..86b75a2d7989 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -1202,8 +1202,8 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, * store exists before and after the requested page. */ static boolean_t -swp_pager_haspage_iter(struct pctrie_iter *blks, vm_pindex_t pindex, - int *before, int *after) +swp_pager_haspage_iter(vm_pindex_t pindex, int *before, int *after, + struct pctrie_iter *blks) { daddr_t blk, blk0; int i; @@ -1265,7 +1265,7 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, struct pctrie_iter blks; swblk_iter_init_only(&blks, object); - return (swp_pager_haspage_iter(&blks, pindex, before, after)); + return (swp_pager_haspage_iter(pindex, before, after, &blks)); } static void @@ -1366,7 +1366,7 @@ swap_pager_getpages_locked(struct pctrie_iter *blks, vm_object_t object, KASSERT((object->flags & OBJ_SWAP) != 0, ("%s: object not swappable", __func__)); pindex = ma[0]->pindex; - if (!swp_pager_haspage_iter(blks, pindex, &rbehind, &rahead)) { + if (!swp_pager_haspage_iter(pindex, &rbehind, &rahead, blks)) { VM_OBJECT_WUNLOCK(object); uma_zfree(swrbuf_zone, bp); return (VM_PAGER_FAIL); @@ -1935,11 +1935,9 @@ swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object, if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL)) break; } else { - m = vm_radix_iter_lookup_lt(&pages, - blks.index + i); - m = vm_page_alloc_after( - object, &pages, blks.index + i, - VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL, m); + m = vm_page_alloc_iter(object, blks.index + i, + VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL, + &pages); if (m == NULL) break; } @@ -2593,8 +2591,7 @@ swap_pager_scan_all_shadowed(vm_object_t object) * required to clear valid and initiate paging. */ if ((pp == NULL || vm_page_none_valid(pp)) && - !swp_pager_haspage_iter(&blks, new_pindex, NULL, - NULL)) + !swp_pager_haspage_iter(new_pindex, NULL, NULL, &blks)) break; if (pi == pv) vm_page_xunbusy(p); diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 36e6e7d50513..21584abacfa3 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -1291,9 +1291,8 @@ vm_fault_allocate(struct faultstate *fs, struct pctrie_iter *pages) vm_fault_unlock_and_deallocate(fs); return (FAULT_FAILURE); } - fs->m = vm_page_alloc_after(fs->object, pages, fs->pindex, - P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0, - vm_radix_iter_lookup_lt(pages, fs->pindex)); + fs->m = vm_page_alloc_iter(fs->object, fs->pindex, + P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0, pages); } if (fs->m == NULL) { if (vm_fault_allocate_oom(fs)) @@ -2103,7 +2102,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused, vm_pindex_t dst_pindex, pindex, src_pindex; vm_prot_t access, prot; vm_offset_t vaddr; - vm_page_t dst_m, mpred; + vm_page_t dst_m; vm_page_t src_m; bool upgrade; @@ -2176,11 +2175,9 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused, * regardless of whether they can be written. */ vm_page_iter_init(&pages, dst_object); - mpred = (src_object == dst_object) ? - vm_page_mpred(src_object, src_pindex) : NULL; for (vaddr = dst_entry->start, dst_pindex = 0; vaddr < dst_entry->end; - vaddr += PAGE_SIZE, dst_pindex++, mpred = dst_m) { + vaddr += PAGE_SIZE, dst_pindex++) { again: /* * Find the page in the source object, and copy it in. @@ -2220,15 +2217,14 @@ again: */ pindex = (src_object == dst_object ? src_pindex : 0) + dst_pindex; - dst_m = vm_page_alloc_after(dst_object, &pages, pindex, - VM_ALLOC_NORMAL, mpred); + dst_m = vm_page_alloc_iter(dst_object, pindex, + VM_ALLOC_NORMAL, &pages); if (dst_m == NULL) { VM_OBJECT_WUNLOCK(dst_object); VM_OBJECT_RUNLOCK(object); vm_wait(dst_object); VM_OBJECT_WLOCK(dst_object); pctrie_iter_reset(&pages); - mpred = vm_radix_iter_lookup_lt(&pages, pindex); goto again; } diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index c441b8b3155f..c2a032b24000 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -615,7 +615,7 @@ vm_thread_stack_back(vm_offset_t ks, vm_page_t ma[], int npages, int req_class, struct pctrie_iter pages; vm_object_t obj = vm_thread_kstack_size_to_obj(npages); vm_pindex_t pindex; - vm_page_t m, mpred; + vm_page_t m; int n; pindex = vm_kstack_pindex(ks, npages); @@ -623,14 +623,12 @@ vm_thread_stack_back(vm_offset_t ks, vm_page_t ma[], int npages, int req_class, vm_page_iter_init(&pages, obj); VM_OBJECT_WLOCK(obj); for (n = 0; n < npages; ma[n++] = m) { - m = vm_page_grab_iter(obj, &pages, pindex + n, - VM_ALLOC_NOCREAT | VM_ALLOC_WIRED); + m = vm_page_grab_iter(obj, pindex + n, + VM_ALLOC_NOCREAT | VM_ALLOC_WIRED, &pages); if (m != NULL) continue; - mpred = (n > 0) ? ma[n - 1] : - vm_radix_iter_lookup_lt(&pages, pindex); - m = vm_page_alloc_domain_after(obj, &pages, pindex + n, - domain, req_class | VM_ALLOC_WIRED, mpred); + m = vm_page_alloc_domain_iter(obj, pindex + n, + domain, req_class | VM_ALLOC_WIRED, &pages); if (m != NULL) continue; for (int i = 0; i < n; i++) { diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index d13dfb1bc953..875c22d27628 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -532,7 +532,7 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, { struct pctrie_iter pages; vm_offset_t offset, i; - vm_page_t m, mpred; + vm_page_t m; vm_prot_t prot; int pflags; @@ -550,10 +550,9 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, vm_page_iter_init(&pages, object); VM_OBJECT_WLOCK(object); retry: - mpred = vm_radix_iter_lookup_lt(&pages, atop(offset + i)); - for (; i < size; i += PAGE_SIZE, mpred = m) { - m = vm_page_alloc_domain_after(object, &pages, atop(offset + i), - domain, pflags, mpred); + for (; i < size; i += PAGE_SIZE) { + m = vm_page_alloc_domain_iter(object, atop(offset + i), + domain, pflags, &pages); /* * Ran out of space, free everything up and return. Don't need diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index bf6867d4ffcc..b885b3eda5c1 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -233,7 +233,6 @@ static void _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags, vm_object_t object, void *handle) { - LIST_INIT(&object->shadow_head); object->type = type; @@ -2137,8 +2136,8 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) vm_page_iter_init(&pages, object); VM_OBJECT_ASSERT_WLOCKED(object); for (pindex = start; pindex < end; pindex++) { - rv = vm_page_grab_valid_iter(&m, object, &pages, pindex, - VM_ALLOC_NORMAL); + rv = vm_page_grab_valid_iter(&m, object, pindex, + VM_ALLOC_NORMAL, &pages); if (rv != VM_PAGER_OK) break; @@ -2289,10 +2288,9 @@ vm_object_prepare_buf_pages(vm_object_t object, vm_page_t *ma_dst, int count, mpred = vm_radix_iter_lookup_lt(&pages, pindex); *rbehind = MIN(*rbehind, pindex - (mpred != NULL ? mpred->pindex + 1 : 0)); - /* Stepping backward from pindex, mpred doesn't change. */ for (int i = 0; i < *rbehind; i++) { - m = vm_page_alloc_after(object, &pages, pindex - i - 1, - VM_ALLOC_NORMAL, mpred); + m = vm_page_alloc_iter(object, pindex - i - 1, + VM_ALLOC_NORMAL, &pages); if (m == NULL) { /* Shift the array. */ for (int j = 0; j < i; j++) @@ -2312,15 +2310,14 @@ vm_object_prepare_buf_pages(vm_object_t object, vm_page_t *ma_dst, int count, msucc = vm_radix_iter_lookup_ge(&pages, pindex); *rahead = MIN(*rahead, (msucc != NULL ? msucc->pindex : object->size) - pindex); - mpred = m; for (int i = 0; i < *rahead; i++) { - m = vm_page_alloc_after(object, &pages, pindex + i, - VM_ALLOC_NORMAL, mpred); + m = vm_page_alloc_iter(object, pindex + i, + VM_ALLOC_NORMAL, &pages); if (m == NULL) { *rahead = i; break; } - ma_dst[*rbehind + count + i] = mpred = m; + ma_dst[*rbehind + count + i] = m; } } } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 15d38dd436ae..869ba1634000 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -1469,18 +1469,14 @@ vm_page_dirty_KBI(vm_page_t m) } /* - * Insert the given page into the given object at the given pindex. From - * vm_page_insert, iter is false, mpred is initially NULL, and this procedure - * looks it up. From vm_page_iter_insert, iter is true and mpred is known to - * the caller to be valid, and may be NULL if this will be the page with the - * lowest pindex. + * Insert the given page into the given object at the given pindex. * * The procedure is marked __always_inline to suggest to the compiler to - * eliminate the lookup parameter and the associated alternate branch. + * eliminate the iter parameter and the associated alternate branch. */ static __always_inline int vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, - struct pctrie_iter *pages, bool iter, vm_page_t mpred) + bool iter, struct pctrie_iter *pages) { int error; @@ -1496,13 +1492,12 @@ vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, m->ref_count |= VPRC_OBJREF; /* - * Add this page to the object's radix tree, and look up mpred if - * needed. + * Add this page to the object's radix tree. */ if (iter) error = vm_radix_iter_insert(pages, m); else - error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred); + error = vm_radix_insert(&object->rtree, m); if (__predict_false(error != 0)) { m->object = NULL; m->pindex = 0; @@ -1510,9 +1505,6 @@ vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, return (1); } - /* - * Now link into the object's ordered list of backed pages. - */ vm_page_insert_radixdone(m, object); vm_pager_page_inserted(object, m); return (0); @@ -1528,7 +1520,7 @@ vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, int vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { - return (vm_page_insert_lookup(m, object, pindex, NULL, false, NULL)); + return (vm_page_insert_lookup(m, object, pindex, false, NULL)); } /* @@ -1538,16 +1530,13 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) * "pindex" using the iterator "pages". Returns 0 if the insertion was * successful. * - * The page "mpred" must immediately precede the offset "pindex" within - * the specified object. - * * The object must be locked. */ static int -vm_page_iter_insert(struct pctrie_iter *pages, vm_page_t m, vm_object_t object, - vm_pindex_t pindex, vm_page_t mpred) +vm_page_iter_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex, + struct pctrie_iter *pages) { - return (vm_page_insert_lookup(m, object, pindex, pages, true, mpred)); + return (vm_page_insert_lookup(m, object, pindex, true, pages)); } /* @@ -1937,7 +1926,6 @@ bool vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) { - vm_page_t mpred; vm_pindex_t opidx; KASSERT((m->ref_count & VPRC_OBJREF) != 0, @@ -1952,14 +1940,13 @@ vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m, */ opidx = m->pindex; m->pindex = new_pindex; - if (vm_radix_insert_lookup_lt(&new_object->rtree, m, &mpred) != 0) { + if (vm_radix_insert(&new_object->rtree, m) != 0) { m->pindex = opidx; return (false); } /* - * The operation cannot fail anymore. The removal must happen before - * the listq iterator is tainted. + * The operation cannot fail anymore. */ m->pindex = opidx; vm_radix_iter_remove(old_pages); @@ -1976,18 +1963,6 @@ vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m, return (true); } -/* - * vm_page_mpred: - * - * Return the greatest page of the object with index <= pindex, - * or NULL, if there is none. Assumes object lock is held. - */ -vm_page_t -vm_page_mpred(vm_object_t object, vm_pindex_t pindex) -{ - return (vm_radix_lookup_le(&object->rtree, pindex)); -} - /* * vm_page_alloc: * @@ -2016,8 +1991,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) struct pctrie_iter pages; vm_page_iter_init(&pages, object); - return (vm_page_alloc_after(object, &pages, pindex, req, - vm_page_mpred(object, pindex))); + return (vm_page_alloc_iter(object, pindex, req, &pages)); } /* @@ -2027,8 +2001,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) * page index, or NULL if no such page exists. */ vm_page_t -vm_page_alloc_after(vm_object_t object, struct pctrie_iter *pages, - vm_pindex_t pindex, int req, vm_page_t mpred) +vm_page_alloc_iter(vm_object_t object, vm_pindex_t pindex, int req, + struct pctrie_iter *pages) { struct vm_domainset_iter di; vm_page_t m; @@ -2036,8 +2010,8 @@ vm_page_alloc_after(vm_object_t object, struct pctrie_iter *pages, vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); do { - m = vm_page_alloc_domain_after(object, pages, pindex, domain, - req, mpred); + m = vm_page_alloc_domain_iter(object, pindex, domain, req, + pages); if (m != NULL) break; } while (vm_domainset_iter_page(&di, object, &domain) == 0); @@ -2099,8 +2073,8 @@ vm_domain_allocate(struct vm_domain *vmd, int req, int npages) } vm_page_t -vm_page_alloc_domain_after(vm_object_t object, struct pctrie_iter *pages, - vm_pindex_t pindex, int domain, int req, vm_page_t mpred) +vm_page_alloc_domain_iter(vm_object_t object, vm_pindex_t pindex, int domain, + int req, struct pctrie_iter *pages) { struct vm_domain *vmd; vm_page_t m; @@ -2116,9 +2090,6 @@ vm_page_alloc_domain_after(vm_object_t object, struct pctrie_iter *pages, KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), ("invalid request %#x", req)); - KASSERT(mpred == NULL || mpred->pindex < pindex, - ("mpred %p doesn't precede pindex 0x%jx", mpred, - (uintmax_t)pindex)); VM_OBJECT_ASSERT_WLOCKED(object); flags = 0; @@ -2136,7 +2107,7 @@ again: * Can we allocate the page from a reservation? */ if (vm_object_reserv(object) && - (m = vm_reserv_alloc_page(object, pages, pindex, domain, req)) != + (m = vm_reserv_alloc_page(object, pindex, domain, req, pages)) != NULL) { goto found; } @@ -2206,7 +2177,7 @@ found: } m->a.act_count = 0; - if (vm_page_insert_lookup(m, object, pindex, pages, true, mpred)) { + if (vm_page_iter_insert(m, object, pindex, pages)) { if (req & VM_ALLOC_WIRED) { vm_wire_sub(1); m->ref_count = 0; @@ -2372,8 +2343,8 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, * Can we allocate the pages from a reservation? */ if (vm_object_reserv(object) && - (m_ret = vm_reserv_alloc_contig(object, &pages, pindex, - domain, req, npages, low, high, alignment, boundary)) != + (m_ret = vm_reserv_alloc_contig(object, pindex, domain, + req, npages, low, high, alignment, boundary, &pages)) != NULL) { break; } @@ -2414,7 +2385,7 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, m->a.act_count = 0; m->oflags = oflags; m->pool = VM_FREEPOOL_DEFAULT; - if (vm_page_iter_insert(&pages, m, object, pindex, mpred)) { + if (vm_page_iter_insert(m, object, pindex, &pages)) { if ((req & VM_ALLOC_WIRED) != 0) vm_wire_sub(npages); KASSERT(m->object == NULL, @@ -4753,8 +4724,8 @@ vm_page_grab_pflags(int allocflags) * not it was grabbed. */ static inline vm_page_t -vm_page_grab_lookup(struct pctrie_iter *pages, vm_object_t object, - vm_pindex_t pindex, int allocflags, bool *found) +vm_page_grab_lookup(vm_object_t object, vm_pindex_t pindex, int allocflags, + bool *found, struct pctrie_iter *pages) { vm_page_t m; @@ -4777,25 +4748,24 @@ vm_page_grab_lookup(struct pctrie_iter *pages, vm_object_t object, * however, be released and reacquired if the routine sleeps. */ vm_page_t -vm_page_grab_iter(vm_object_t object, struct pctrie_iter *pages, - vm_pindex_t pindex, int allocflags) +vm_page_grab_iter(vm_object_t object, vm_pindex_t pindex, int allocflags, + struct pctrie_iter *pages) { - vm_page_t m, mpred; + vm_page_t m; bool found; VM_OBJECT_ASSERT_WLOCKED(object); vm_page_grab_check(allocflags); while ((m = vm_page_grab_lookup( - pages, object, pindex, allocflags, &found)) == NULL) { + object, pindex, allocflags, &found, pages)) == NULL) { if ((allocflags & VM_ALLOC_NOCREAT) != 0) return (NULL); if (found && (allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) return (NULL); - mpred = vm_radix_iter_lookup_lt(pages, pindex); - m = vm_page_alloc_after(object, pages, pindex, - vm_page_grab_pflags(allocflags), mpred); + m = vm_page_alloc_iter(object, pindex, + vm_page_grab_pflags(allocflags), pages); if (m != NULL) { if ((allocflags & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) @@ -4825,7 +4795,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) VM_OBJECT_ASSERT_WLOCKED(object); vm_page_iter_init(&pages, object); - return (vm_page_grab_iter(object, &pages, pindex, allocflags)); + return (vm_page_grab_iter(object, pindex, allocflags, &pages)); } /* @@ -4909,8 +4879,8 @@ vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags) * of allocflags. */ int -vm_page_grab_valid_iter(vm_page_t *mp, vm_object_t object, - struct pctrie_iter *pages, vm_pindex_t pindex, int allocflags) +vm_page_grab_valid_iter(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, + int allocflags, struct pctrie_iter *pages) { vm_page_t m, mpred; vm_page_t ma[VM_INITIAL_PAGEIN]; @@ -4957,7 +4927,7 @@ retrylookup: return (VM_PAGER_FAIL); } else { mpred = vm_radix_iter_lookup_lt(pages, pindex); - m = vm_page_alloc_after(object, pages, pindex, pflags, mpred); + m = vm_page_alloc_iter(object, pindex, pflags, pages); if (m == NULL) { if (!vm_pager_can_alloc_page(object, pindex)) { *mp = NULL; @@ -4978,8 +4948,8 @@ retrylookup: for (i = 1; i < after; i++) { m = vm_radix_iter_lookup(pages, pindex + i); if (m == NULL) { - m = vm_page_alloc_after(object, pages, - pindex + i, VM_ALLOC_NORMAL, mpred); + m = vm_page_alloc_iter(object, pindex + i, + VM_ALLOC_NORMAL, pages); if (m == NULL) break; } else if (vm_page_any_valid(m) || !vm_page_tryxbusy(m)) @@ -5037,8 +5007,8 @@ vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, VM_OBJECT_ASSERT_WLOCKED(object); vm_page_iter_init(&pages, object); - return (vm_page_grab_valid_iter(mp, object, &pages, pindex, - allocflags)); + return (vm_page_grab_valid_iter(mp, object, pindex, allocflags, + &pages)); } /* @@ -5054,7 +5024,7 @@ vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base, int end) { struct pctrie_iter pages; - vm_page_t m, mpred; + vm_page_t m; int allocflags, rv; bool found; @@ -5066,12 +5036,11 @@ vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base, allocflags = VM_ALLOC_NOCREAT | VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL; vm_page_iter_init(&pages, object); while ((m = vm_page_grab_lookup( - &pages, object, pindex, allocflags, &found)) == NULL) { + object, pindex, allocflags, &found, &pages)) == NULL) { if (!vm_pager_has_page(object, pindex, NULL, NULL)) return (0); - mpred = vm_radix_iter_lookup_lt(&pages, pindex); - m = vm_page_alloc_after(object, &pages, pindex, - vm_page_grab_pflags(allocflags), mpred); + m = vm_page_alloc_iter(object, pindex, + vm_page_grab_pflags(allocflags), &pages); if (m != NULL) { vm_object_pip_add(object, 1); VM_OBJECT_WUNLOCK(object); @@ -5182,7 +5151,7 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count) { struct pctrie_iter pages; - vm_page_t m, mpred; + vm_page_t m; int pflags; int i; @@ -5197,7 +5166,6 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, i = 0; vm_page_iter_init(&pages, object); retrylookup: - mpred = vm_radix_iter_lookup_lt(&pages, pindex + i); for (; i < count; i++) { m = vm_radix_iter_lookup(&pages, pindex + i); if (m != NULL) { @@ -5212,8 +5180,8 @@ retrylookup: } else { if ((allocflags & VM_ALLOC_NOCREAT) != 0) break; - m = vm_page_alloc_after(object, &pages, pindex + i, - pflags | VM_ALLOC_COUNT(count - i), mpred); + m = vm_page_alloc_iter(object, pindex + i, + pflags | VM_ALLOC_COUNT(count - i), &pages); if (m == NULL) { if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) @@ -5228,7 +5196,7 @@ retrylookup: vm_page_valid(m); } vm_page_grab_release(m, allocflags); - ma[i] = mpred = m; + ma[i] = m; } return (i); } diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 6d1982767cc8..979d9bd12b9f 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -79,7 +79,7 @@ * perform object/offset lookups * * A list of all pages for a given object, - * so they can be quickly deactivated at + * so that they can be quickly deactiviated at * time of deallocation. * * An ordered list of pages due for pageout. @@ -606,12 +606,11 @@ void vm_page_free_zero(vm_page_t m); void vm_page_activate (vm_page_t); void vm_page_advise(vm_page_t m, int advice); -vm_page_t vm_page_mpred(vm_object_t, vm_pindex_t); vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int); -vm_page_t vm_page_alloc_after(vm_object_t, struct pctrie_iter *, vm_pindex_t, - int, vm_page_t); -vm_page_t vm_page_alloc_domain_after(vm_object_t, struct pctrie_iter *, - vm_pindex_t, int, int, vm_page_t); +vm_page_t vm_page_alloc_iter(vm_object_t, vm_pindex_t, int, + struct pctrie_iter *); +vm_page_t vm_page_alloc_domain_iter(vm_object_t, vm_pindex_t, int, int, + struct pctrie_iter *); vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); @@ -632,8 +631,8 @@ bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose); int vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base, int end); vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int); -vm_page_t vm_page_grab_iter(vm_object_t object, struct pctrie_iter *pages, vm_pindex_t pindex, - int allocflags); +vm_page_t vm_page_grab_iter(vm_object_t object, vm_pindex_t pindex, + int allocflags, struct pctrie_iter *pages); vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int); int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count); @@ -642,7 +641,7 @@ int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags); int vm_page_grab_valid_iter(vm_page_t *mp, vm_object_t object, - struct pctrie_iter *, vm_pindex_t pindex, int allocflags); + vm_pindex_t pindex, int allocflags, struct pctrie_iter *pages); int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags); void vm_page_deactivate(vm_page_t); diff --git a/sys/vm/vm_radix.h b/sys/vm/vm_radix.h index 231075d32754..1e9115f1a6ba 100644 --- a/sys/vm/vm_radix.h +++ b/sys/vm/vm_radix.h @@ -79,26 +79,6 @@ vm_radix_iter_insert(struct pctrie_iter *pages, vm_page_t page) return (VM_RADIX_PCTRIE_ITER_INSERT(pages, page)); } -/* - * Insert the page into the vm_radix tree with its pindex as the key. Panic if - * the pindex already exists. Return zero on success or a non-zero error on - * memory allocation failure. Set the out parameter mpred to the previous page - * in the tree as if found by a previous call to vm_radix_lookup_le with the - * new page pindex. - */ -static __inline int -vm_radix_insert_lookup_lt(struct vm_radix *rtree, vm_page_t page, - vm_page_t *mpred) -{ - int error; - - error = VM_RADIX_PCTRIE_INSERT_LOOKUP_LE(&rtree->rt_trie, page, mpred); - if (__predict_false(error == EEXIST)) - panic("vm_radix_insert_lookup_lt: page already present, %p", - *mpred); - return (error); -} - /* * Returns the value stored at the index assuming there is an external lock. * diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c index a562fd80b4a1..bc4c47076975 100644 --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -510,8 +510,8 @@ vm_reserv_from_page(vm_page_t m) * successor pointer. */ static vm_reserv_t -vm_reserv_from_object(vm_object_t object, struct pctrie_iter *pages, - vm_pindex_t pindex, vm_page_t *mpredp, vm_page_t *msuccp) +vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex, + vm_page_t *mpredp, vm_page_t *msuccp, struct pctrie_iter *pages) { vm_reserv_t rv; vm_page_t mpred, msucc; @@ -685,9 +685,9 @@ vm_reserv_populate(vm_reserv_t rv, int index) * The object must be locked. */ vm_page_t -vm_reserv_alloc_contig(vm_object_t object, struct pctrie_iter *pages, - vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, - vm_paddr_t high, u_long alignment, vm_paddr_t boundary) +vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain, + int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, + vm_paddr_t boundary, struct pctrie_iter *pages) { struct vm_domain *vmd; vm_paddr_t pa, size; @@ -725,7 +725,7 @@ vm_reserv_alloc_contig(vm_object_t object, struct pctrie_iter *pages, /* * Look for an existing reservation. */ - rv = vm_reserv_from_object(object, pages, pindex, &mpred, &msucc); + rv = vm_reserv_from_object(object, pindex, &mpred, &msucc, pages); if (rv != NULL) { KASSERT(object != kernel_object || rv->domain == domain, ("vm_reserv_alloc_contig: domain mismatch")); @@ -832,8 +832,8 @@ out: * The object must be locked. */ vm_page_t -vm_reserv_alloc_page(vm_object_t object, struct pctrie_iter *pages, - vm_pindex_t pindex, int domain, int req) +vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain, + int req, struct pctrie_iter *pages) { struct vm_domain *vmd; vm_page_t m, mpred, msucc; @@ -853,7 +853,7 @@ vm_reserv_alloc_page(vm_object_t object, struct pctrie_iter *pages, /* * Look for an existing reservation. */ - rv = vm_reserv_from_object(object, pages, pindex, &mpred, &msucc); + rv = vm_reserv_from_object(object, pindex, &mpred, &msucc, pages); if (rv != NULL) { KASSERT(object != kernel_object || rv->domain == domain, ("vm_reserv_alloc_page: domain mismatch")); diff --git a/sys/vm/vm_reserv.h b/sys/vm/vm_reserv.h index 1dcf09e6c736..24620e8a2f9a 100644 --- a/sys/vm/vm_reserv.h +++ b/sys/vm/vm_reserv.h @@ -45,13 +45,12 @@ /* * The following functions are only to be used by the virtual memory system. */ -vm_page_t vm_reserv_alloc_contig(vm_object_t object, - struct pctrie_iter *pages, vm_pindex_t pindex, int domain, - int req, u_long npages, vm_paddr_t low, vm_paddr_t high, - u_long alignment, vm_paddr_t boundary); -vm_page_t vm_reserv_alloc_page(vm_object_t object, - struct pctrie_iter *pages, vm_pindex_t pindex, int domain, - int req); +vm_page_t vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, + int domain, int req, u_long npages, vm_paddr_t low, + vm_paddr_t high, u_long alignment, vm_paddr_t boundary, + struct pctrie_iter *pages); +vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, + int domain, int req, struct pctrie_iter *pages); void vm_reserv_break_all(vm_object_t object); boolean_t vm_reserv_free_page(vm_page_t m); void vm_reserv_init(void);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202505072144.547LiZR0010250>