Date: Wed, 21 Aug 2019 16:11:13 +0000 (UTC) From: Mark Johnston <markj@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r351333 - head/sys/vm Message-ID: <201908211611.x7LGBD8Z081905@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: markj Date: Wed Aug 21 16:11:12 2019 New Revision: 351333 URL: https://svnweb.freebsd.org/changeset/base/351333 Log: Simplify vm_page_dequeue() and fix an assertion. - Add a vm_pagequeue_remove() function to physically remove a page from its queue and update the queue length. - Remove vm_page_pagequeue_lockptr() and let vm_page_pagequeue() return NULL for dequeued pages. - Avoid unnecessarily reloading the queue index if vm_page_dequeue() loses a race with a concurrent queue operation. - Correct an always-true assertion: vm_page_dequeue() may be called from the page allocator with the page unlocked. The assertion m->order == VM_NFREEORDER simply tests whether the page has been removed from the vm_phys free lists; instead, check whether the page belongs to an object. Reviewed by: kib MFC after: 1 week Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D21341 Modified: head/sys/vm/vm_page.c head/sys/vm/vm_page.h head/sys/vm/vm_pagequeue.h Modified: head/sys/vm/vm_page.c ============================================================================== --- head/sys/vm/vm_page.c Wed Aug 21 16:01:17 2019 (r351332) +++ head/sys/vm/vm_page.c Wed Aug 21 16:11:12 2019 (r351333) @@ -3056,21 +3056,15 @@ vm_waitpfault(struct domainset *dset, int timo) mtx_unlock(&vm_domainset_lock); } -struct vm_pagequeue * +static struct vm_pagequeue * vm_page_pagequeue(vm_page_t m) { - return (&vm_pagequeue_domain(m)->vmd_pagequeues[m->queue]); -} - -static struct mtx * -vm_page_pagequeue_lockptr(vm_page_t m) -{ uint8_t queue; if ((queue = atomic_load_8(&m->queue)) == PQ_NONE) return (NULL); - return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue].pq_mutex); + return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]); } static inline void @@ -3093,10 +3087,8 @@ vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_pa m, pq, qflags)); if ((qflags & PGA_DEQUEUE) != 0) { - if (__predict_true((qflags & PGA_ENQUEUED) != 0)) { - TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); - vm_pagequeue_cnt_dec(pq); - } + if (__predict_true((qflags & PGA_ENQUEUED) != 0)) + vm_pagequeue_remove(pq, m); vm_page_dequeue_complete(m); } else if ((qflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) { if ((qflags & PGA_ENQUEUED) != 0) @@ -3299,16 +3291,14 @@ vm_page_dequeue_deferred_free(vm_page_t m) void vm_page_dequeue(vm_page_t m) { - struct mtx *lock, *lock1; - struct vm_pagequeue *pq; + struct vm_pagequeue *pq, *pq1; uint8_t aflags; - KASSERT(mtx_owned(vm_page_lockptr(m)) || m->order == VM_NFREEORDER, + KASSERT(mtx_owned(vm_page_lockptr(m)) || m->object == NULL, ("page %p is allocated and unlocked", m)); - for (;;) { - lock = vm_page_pagequeue_lockptr(m); - if (lock == NULL) { + for (pq = vm_page_pagequeue(m);; pq = pq1) { + if (pq == NULL) { /* * A thread may be concurrently executing * vm_page_dequeue_complete(). Ensure that all queue @@ -3327,27 +3317,24 @@ vm_page_dequeue(vm_page_t m) * critical section. */ cpu_spinwait(); + pq1 = vm_page_pagequeue(m); continue; } - mtx_lock(lock); - if ((lock1 = vm_page_pagequeue_lockptr(m)) == lock) + vm_pagequeue_lock(pq); + if ((pq1 = vm_page_pagequeue(m)) == pq) break; - mtx_unlock(lock); - lock = lock1; + vm_pagequeue_unlock(pq); } - KASSERT(lock == vm_page_pagequeue_lockptr(m), + KASSERT(pq == vm_page_pagequeue(m), ("%s: page %p migrated directly between queues", __func__, m)); KASSERT((m->aflags & PGA_DEQUEUE) != 0 || mtx_owned(vm_page_lockptr(m)), ("%s: queued unlocked page %p", __func__, m)); - if ((m->aflags & PGA_ENQUEUED) != 0) { - pq = vm_page_pagequeue(m); - TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); - vm_pagequeue_cnt_dec(pq); - } + if ((m->aflags & PGA_ENQUEUED) != 0) + vm_pagequeue_remove(pq, m); vm_page_dequeue_complete(m); - mtx_unlock(lock); + vm_pagequeue_unlock(pq); } /* Modified: head/sys/vm/vm_page.h ============================================================================== --- head/sys/vm/vm_page.h Wed Aug 21 16:01:17 2019 (r351332) +++ head/sys/vm/vm_page.h Wed Aug 21 16:11:12 2019 (r351333) @@ -552,7 +552,6 @@ void vm_page_launder(vm_page_t m); vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); vm_page_t vm_page_next(vm_page_t m); int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *); -struct vm_pagequeue *vm_page_pagequeue(vm_page_t m); vm_page_t vm_page_prev(vm_page_t m); bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m); void vm_page_putfake(vm_page_t m); Modified: head/sys/vm/vm_pagequeue.h ============================================================================== --- head/sys/vm/vm_pagequeue.h Wed Aug 21 16:01:17 2019 (r351332) +++ head/sys/vm/vm_pagequeue.h Wed Aug 21 16:11:12 2019 (r351333) @@ -199,6 +199,14 @@ vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int adde #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1) static inline void +vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m) +{ + + TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); + vm_pagequeue_cnt_dec(pq); +} + +static inline void vm_batchqueue_init(struct vm_batchqueue *bq) {
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201908211611.x7LGBD8Z081905>