Date: Wed, 20 Oct 2021 01:23:16 GMT From: Mark Johnston <markj@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: b498f71bc56a - main - vm_page: Add a new page allocator interface for unnamed pages Message-ID: <202110200123.19K1NGJh095466@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch main has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=b498f71bc56af0069d9a4685b8385ee613a00727 commit b498f71bc56af0069d9a4685b8385ee613a00727 Author: Mark Johnston <markj@FreeBSD.org> AuthorDate: 2021-10-20 00:22:12 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2021-10-20 01:22:55 +0000 vm_page: Add a new page allocator interface for unnamed pages The diff adds vm_page_alloc_noobj() and vm_page_alloc_noobj_domain(). These mostly correspond to vm_page_alloc() and vm_page_alloc_domain() when no VM object is specified, with the exception that they handle VM_ALLOC_ZERO by zeroing the page, rather than by preserving PG_ZERO. This simplifies callers and will permit simplification of the vm_page_alloc_domain() definition. Since the new allocator variant is similar to vm_page_alloc_freelist(), implement both of them using a common backend allocator function. No functional change intended. Reviewed by: alc, kib MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D31985 --- sys/vm/vm_page.c | 141 +++++++++++++++++++++++++++++++++++-------------------- sys/vm/vm_page.h | 14 +++--- 2 files changed, 98 insertions(+), 57 deletions(-) diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index bc1c6bbe4ce1..74b09fc27be5 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2395,54 +2395,41 @@ found: } /* - * vm_page_alloc_freelist: - * - * Allocate a physical page from the specified free page list. - * - * The caller must always specify an allocation class. - * - * allocation classes: - * VM_ALLOC_NORMAL normal process request - * VM_ALLOC_SYSTEM system *really* needs a page - * VM_ALLOC_INTERRUPT interrupt time request - * - * optional allocation flags: - * VM_ALLOC_COUNT(number) the number of additional pages that the caller - * intends to allocate - * VM_ALLOC_WIRED wire the allocated page - * VM_ALLOC_ZERO prefer a zeroed page + * Allocate a physical page that is not intended to be inserted into a VM + * object. If the "freelist" parameter is not equal to VM_NFREELIST, then only + * pages from the specified vm_phys freelist will be returned. */ -vm_page_t -vm_page_alloc_freelist(int freelist, int req) -{ - struct vm_domainset_iter di; - vm_page_t m; - int domain; - - vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); - do { - m = vm_page_alloc_freelist_domain(domain, freelist, req); - if (m != NULL) - break; - } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); - - return (m); -} - -vm_page_t -vm_page_alloc_freelist_domain(int domain, int freelist, int req) +static __always_inline vm_page_t +_vm_page_alloc_noobj_domain(int domain, const int freelist, int req) { struct vm_domain *vmd; vm_page_t m; - u_int flags; + int flags; - m = NULL; + KASSERT((req & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | + VM_ALLOC_NOOBJ)) == 0, + ("%s: invalid req %#x", __func__, req)); + + flags = (req & VM_ALLOC_NODUMP) != 0 ? PG_NODUMP : 0; vmd = VM_DOMAIN(domain); again: + if (freelist == VM_NFREELIST && + vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) { + m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone, + M_NOWAIT | M_NOVM); + if (m != NULL) { + flags |= PG_PCPU_CACHE; + goto found; + } + } + if (vm_domain_allocate(vmd, req, 1)) { vm_domain_free_lock(vmd); - m = vm_phys_alloc_freelist_pages(domain, freelist, - VM_FREEPOOL_DIRECT, 0); + if (freelist == VM_NFREELIST) + m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0); + else + m = vm_phys_alloc_freelist_pages(domain, freelist, + VM_FREEPOOL_DIRECT, 0); vm_domain_free_unlock(vmd); if (m == NULL) vm_domain_freecnt_inc(vmd, 1); @@ -2452,44 +2439,96 @@ again: goto again; return (NULL); } + +found: vm_page_dequeue(m); vm_page_alloc_check(m); - /* - * Initialize the page. Only the PG_ZERO flag is inherited. - */ + /* Consumers should not rely on a useful default pindex value. */ + m->pindex = 0xdeadc0dedeadc0de; + m->flags = (m->flags & PG_ZERO) | flags; m->a.flags = 0; - flags = 0; - if ((req & VM_ALLOC_ZERO) != 0) - flags = PG_ZERO; - m->flags &= flags; + m->oflags = VPO_UNMANAGED; + m->busy_lock = VPB_UNBUSIED; if ((req & VM_ALLOC_WIRED) != 0) { vm_wire_add(1); m->ref_count = 1; } - /* Unmanaged pages don't use "act_count". */ - m->oflags = VPO_UNMANAGED; + + if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) + pmap_zero_page(m); + + return (m); +} + +vm_page_t +vm_page_alloc_freelist(int freelist, int req) +{ + struct vm_domainset_iter di; + vm_page_t m; + int domain; + + vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); + do { + m = vm_page_alloc_freelist_domain(domain, freelist, req); + if (m != NULL) + break; + } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); + + return (m); +} + +vm_page_t +vm_page_alloc_freelist_domain(int domain, int freelist, int req) +{ + KASSERT(freelist >= 0 && freelist < VM_NFREELIST, + ("%s: invalid freelist %d", __func__, freelist)); + + return (_vm_page_alloc_noobj_domain(domain, freelist, req)); +} + +vm_page_t +vm_page_alloc_noobj(int req) +{ + struct vm_domainset_iter di; + vm_page_t m; + int domain; + + vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); + do { + m = vm_page_alloc_noobj_domain(domain, req); + if (m != NULL) + break; + } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); + return (m); } +vm_page_t +vm_page_alloc_noobj_domain(int domain, int req) +{ + return (_vm_page_alloc_noobj_domain(domain, VM_NFREELIST, req)); +} + /* * Check a page that has been freshly dequeued from a freelist. */ static void vm_page_alloc_check(vm_page_t m) { + KASSERT(m->object == NULL, ("page %p has object", m)); - KASSERT(m->ref_count == 0, ("page %p has references", m)); - KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m)); KASSERT(m->a.queue == PQ_NONE && (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, ("page %p has unexpected queue %d, flags %#x", m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK))); - KASSERT(m->valid == 0, ("free page %p is valid", m)); + KASSERT(m->ref_count == 0, ("page %p has references", m)); + KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m)); KASSERT(m->dirty == 0, ("page %p is dirty", m)); KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, ("page %p has unexpected memattr %d", m, pmap_page_get_memattr(m))); + KASSERT(m->valid == 0, ("free page %p is valid", m)); pmap_vm_page_alloc_check(m); } diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 6e0a4328e260..600619b00eaf 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -527,8 +527,8 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); * Legend: * (a) - vm_page_alloc() supports the flag. * (c) - vm_page_alloc_contig() supports the flag. - * (f) - vm_page_alloc_freelist() supports the flag. * (g) - vm_page_grab() supports the flag. + * (n) - vm_page_alloc_noobj() and vm_page_alloc_freelist() support the flag. * (p) - vm_page_grab_pages() supports the flag. * Bits above 15 define the count of additional pages that the caller * intends to allocate. @@ -537,10 +537,10 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); #define VM_ALLOC_INTERRUPT 1 #define VM_ALLOC_SYSTEM 2 #define VM_ALLOC_CLASS_MASK 3 -#define VM_ALLOC_WAITOK 0x0008 /* (acf) Sleep and retry */ -#define VM_ALLOC_WAITFAIL 0x0010 /* (acf) Sleep and return error */ -#define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */ -#define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */ +#define VM_ALLOC_WAITOK 0x0008 /* (acn) Sleep and retry */ +#define VM_ALLOC_WAITFAIL 0x0010 /* (acn) Sleep and return error */ +#define VM_ALLOC_WIRED 0x0020 /* (acgnp) Allocate a wired page */ +#define VM_ALLOC_ZERO 0x0040 /* (acgnp) Allocate a zeroed page */ #define VM_ALLOC_NORECLAIM 0x0080 /* (c) Do not reclaim after failure */ #define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */ #define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */ @@ -548,7 +548,7 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */ #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */ #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */ -#define VM_ALLOC_NOWAIT 0x8000 /* (acfgp) Do not sleep */ +#define VM_ALLOC_NOWAIT 0x8000 /* (acgnp) Do not sleep */ #define VM_ALLOC_COUNT_SHIFT 16 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT) @@ -614,6 +614,8 @@ vm_page_t vm_page_alloc_contig_domain(vm_object_t object, vm_memattr_t memattr); vm_page_t vm_page_alloc_freelist(int, int); vm_page_t vm_page_alloc_freelist_domain(int, int, int); +vm_page_t vm_page_alloc_noobj(int); +vm_page_t vm_page_alloc_noobj_domain(int, int); void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set); bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose); vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202110200123.19K1NGJh095466>