Date: Fri, 21 Feb 2025 21:15:41 GMT From: Doug Moore <dougm@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: 2eef41e55385 - main - Revert "vm_page: define partial page invalidate" Message-ID: <202502212115.51LLFfc3041329@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch main has been updated by dougm: URL: https://cgit.FreeBSD.org/src/commit/?id=2eef41e55385333f9535240f3d31a5a71f718d06 commit 2eef41e55385333f9535240f3d31a5a71f718d06 Author: Doug Moore <dougm@FreeBSD.org> AuthorDate: 2025-02-21 21:14:54 +0000 Commit: Doug Moore <dougm@FreeBSD.org> CommitDate: 2025-02-21 21:14:54 +0000 Revert "vm_page: define partial page invalidate" A negative review arrived as this was being committed, so undo and reevaluate. This reverts commit 5611a38d818587b307e1fb110f72d2996c170035. --- sys/fs/tmpfs/tmpfs_subr.c | 47 ++++++++++++++++++++++++++++++++++++++++---- sys/kern/uipc_shm.c | 47 ++++++++++++++++++++++++++++++++++++++++---- sys/vm/vm_page.c | 50 ----------------------------------------------- sys/vm/vm_page.h | 2 -- 4 files changed, 86 insertions(+), 60 deletions(-) diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c index 0cac19ed3780..41d1f27caf13 100644 --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -493,11 +493,50 @@ static int tmpfs_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base, int end, boolean_t ignerr) { - int error; + vm_page_t m; + int rv, error; + + VM_OBJECT_ASSERT_WLOCKED(object); + KASSERT(base >= 0, ("%s: base %d", __func__, base)); + KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base, + end)); + error = 0; + +retry: + m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT); + if (m != NULL) { + MPASS(vm_page_all_valid(m)); + } else if (vm_pager_has_page(object, idx, NULL, NULL)) { + m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL | + VM_ALLOC_WAITFAIL); + if (m == NULL) + goto retry; + vm_object_pip_add(object, 1); + VM_OBJECT_WUNLOCK(object); + rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(object); + vm_object_pip_wakeup(object); + if (rv == VM_PAGER_OK) { + /* + * Since the page was not resident, and therefore not + * recently accessed, immediately enqueue it for + * asynchronous laundering. The current operation is + * not regarded as an access. + */ + vm_page_launder(m); + } else { + vm_page_free(m); + m = NULL; + if (!ignerr) + error = EIO; + } + } + if (m != NULL) { + pmap_zero_page_area(m, base, end - base); + vm_page_set_dirty(m); + vm_page_xunbusy(m); + } - error = vm_page_partial_page_invalidate(object, idx, base, end); - if (ignerr) - error = 0; return (error); } diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c index 083e95432208..026611a59593 100644 --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -697,12 +697,51 @@ static int shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base, int end) { - int error; + vm_page_t m; + int rv; - error = vm_page_partial_page_invalidate(object, idx, base, end); - if (error == EIO) + VM_OBJECT_ASSERT_WLOCKED(object); + KASSERT(base >= 0, ("%s: base %d", __func__, base)); + KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base, + end)); + +retry: + m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT); + if (m != NULL) { + MPASS(vm_page_all_valid(m)); + } else if (vm_pager_has_page(object, idx, NULL, NULL)) { + m = vm_page_alloc(object, idx, + VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); + if (m == NULL) + goto retry; + vm_object_pip_add(object, 1); VM_OBJECT_WUNLOCK(object); - return (error); + rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(object); + vm_object_pip_wakeup(object); + if (rv == VM_PAGER_OK) { + /* + * Since the page was not resident, and therefore not + * recently accessed, immediately enqueue it for + * asynchronous laundering. The current operation is + * not regarded as an access. + */ + vm_page_launder(m); + } else { + vm_page_free(m); + VM_OBJECT_WUNLOCK(object); + return (EIO); + } + } + if (m != NULL) { + pmap_zero_page_area(m, base, end - base); + KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid", + __func__, m)); + vm_page_set_dirty(m); + vm_page_xunbusy(m); + } + + return (0); } static int diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index f0f3c1e85564..c105aafca40f 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -5086,56 +5086,6 @@ out: return (VM_PAGER_OK); } -/* - * Fill a partial page with zeroes. - */ -int -vm_page_partial_page_invalidate(vm_object_t object, vm_pindex_t pindex, - int base, int end) -{ - vm_page_t m; - int rv; - - VM_OBJECT_ASSERT_WLOCKED(object); - KASSERT(base >= 0, ("%s: base %d", __func__, base)); - KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base, - end)); - -retry: - m = vm_page_grab(object, pindex, VM_ALLOC_NOCREAT); - if (m != NULL) { - MPASS(vm_page_all_valid(m)); - } else if (vm_pager_has_page(object, pindex, NULL, NULL)) { - m = vm_page_alloc(object, pindex, - VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); - if (m == NULL) - goto retry; - vm_object_pip_add(object, 1); - VM_OBJECT_WUNLOCK(object); - rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); - VM_OBJECT_WLOCK(object); - vm_object_pip_wakeup(object); - if (rv != VM_PAGER_OK) { - vm_page_free(m); - return (EIO); - } - - /* - * Since the page was not resident, and therefore not recently - * accessed, immediately enqueue it for asynchronous laundering. - * The current operation is not regarded as an access. - */ - vm_page_launder(m); - } else - return (0); - - pmap_zero_page_area(m, base, end - base); - KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid", __func__, m)); - vm_page_set_dirty(m); - vm_page_xunbusy(m); - return (0); -} - /* * Locklessly grab a valid page. If the page is not valid or not yet * allocated this will fall back to the object lock method. diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index b568fddab8d4..744688bf789b 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -627,8 +627,6 @@ vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages, vm_memattr_t memattr); void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set); bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose); -int vm_page_partial_page_invalidate(vm_object_t object, vm_pindex_t pindex, - int base, int end); vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int); vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int); int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202502212115.51LLFfc3041329>