Date: Sat, 9 Sep 2017 17:35:19 +0000 (UTC) From: Konstantin Belousov <kib@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r323368 - head/sys/vm Message-ID: <201709091735.v89HZJV4034082@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kib Date: Sat Sep 9 17:35:19 2017 New Revision: 323368 URL: https://svnweb.freebsd.org/changeset/base/323368 Log: Add a vm_page_change_lock() helper, the common code to not relock page lock if both old and new pages use the same underlying lock. Convert existing places to use the helper instead of inlining it. Use the optimization in vm_object_page_remove(). Suggested and reviewed by: alc, markj Sponsored by: The FreeBSD Foundation MFC after: 1 week Modified: head/sys/vm/vm_object.c head/sys/vm/vm_page.c head/sys/vm/vm_page.h Modified: head/sys/vm/vm_object.c ============================================================================== --- head/sys/vm/vm_object.c Sat Sep 9 16:04:49 2017 (r323367) +++ head/sys/vm/vm_object.c Sat Sep 9 17:35:19 2017 (r323368) @@ -1917,6 +1917,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t int options) { vm_page_t p, next; + struct mtx *mtx; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & OBJ_UNMANAGED) == 0 || @@ -1927,6 +1928,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t vm_object_pip_add(object, 1); again: p = vm_page_find_least(object, start); + mtx = NULL; /* * Here, the variable "p" is either (1) the page with the least pindex @@ -1943,7 +1945,7 @@ again: * however, be invalidated if the option OBJPR_CLEANONLY is * not specified. */ - vm_page_lock(p); + vm_page_change_lock(p, &mtx); if (vm_page_xbusied(p)) { VM_OBJECT_WUNLOCK(object); vm_page_busy_sleep(p, "vmopax", true); @@ -1957,7 +1959,7 @@ again: p->valid = 0; vm_page_undirty(p); } - goto next; + continue; } if (vm_page_busied(p)) { VM_OBJECT_WUNLOCK(object); @@ -1971,14 +1973,14 @@ again: if ((options & OBJPR_NOTMAPPED) == 0) pmap_remove_write(p); if (p->dirty) - goto next; + continue; } if ((options & OBJPR_NOTMAPPED) == 0) pmap_remove_all(p); vm_page_free(p); -next: - vm_page_unlock(p); } + if (mtx != NULL) + mtx_unlock(mtx); vm_object_pip_wakeup(object); } @@ -2001,7 +2003,7 @@ next: void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) { - struct mtx *mtx, *new_mtx; + struct mtx *mtx; vm_page_t p, next; VM_OBJECT_ASSERT_LOCKED(object); @@ -2018,17 +2020,7 @@ vm_object_page_noreuse(vm_object_t object, vm_pindex_t mtx = NULL; for (; p != NULL && (p->pindex < end || end == 0); p = next) { next = TAILQ_NEXT(p, listq); - - /* - * Avoid releasing and reacquiring the same page lock. - */ - new_mtx = vm_page_lockptr(p); - if (mtx != new_mtx) { - if (mtx != NULL) - mtx_unlock(mtx); - mtx = new_mtx; - mtx_lock(mtx); - } + vm_page_change_lock(p, &mtx); vm_page_deactivate_noreuse(p); } if (mtx != NULL) Modified: head/sys/vm/vm_page.c ============================================================================== --- head/sys/vm/vm_page.c Sat Sep 9 16:04:49 2017 (r323367) +++ head/sys/vm/vm_page.c Sat Sep 9 17:35:19 2017 (r323368) @@ -938,6 +938,23 @@ vm_page_flash(vm_page_t m) } /* + * Avoid releasing and reacquiring the same page lock. + */ +void +vm_page_change_lock(vm_page_t m, struct mtx **mtx) +{ + struct mtx *mtx1; + + mtx1 = vm_page_lockptr(m); + if (*mtx == mtx1) + return; + if (*mtx != NULL) + mtx_unlock(*mtx); + *mtx = mtx1; + mtx_lock(mtx1); +} + +/* * Keep page from being freed by the page daemon * much of the same effect as wiring, except much lower * overhead and should be used only for *very* temporary @@ -970,20 +987,11 @@ vm_page_unhold(vm_page_t mem) void vm_page_unhold_pages(vm_page_t *ma, int count) { - struct mtx *mtx, *new_mtx; + struct mtx *mtx; mtx = NULL; for (; count != 0; count--) { - /* - * Avoid releasing and reacquiring the same page lock. - */ - new_mtx = vm_page_lockptr(*ma); - if (mtx != new_mtx) { - if (mtx != NULL) - mtx_unlock(mtx); - mtx = new_mtx; - mtx_lock(mtx); - } + vm_page_change_lock(*ma, &mtx); vm_page_unhold(*ma); ma++; } @@ -2023,7 +2031,7 @@ vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options) { - struct mtx *m_mtx, *new_mtx; + struct mtx *m_mtx; vm_object_t object; vm_paddr_t pa; vm_page_t m, m_run; @@ -2066,16 +2074,7 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, } else KASSERT(m_run != NULL, ("m_run == NULL")); - /* - * Avoid releasing and reacquiring the same page lock. - */ - new_mtx = vm_page_lockptr(m); - if (m_mtx != new_mtx) { - if (m_mtx != NULL) - mtx_unlock(m_mtx); - m_mtx = new_mtx; - mtx_lock(m_mtx); - } + vm_page_change_lock(m, &m_mtx); m_inc = 1; retry: if (m->wire_count != 0 || m->hold_count != 0) @@ -2225,7 +2224,7 @@ static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, vm_paddr_t high) { - struct mtx *m_mtx, *new_mtx; + struct mtx *m_mtx; struct spglist free; vm_object_t object; vm_paddr_t pa; @@ -2246,13 +2245,7 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_p /* * Avoid releasing and reacquiring the same page lock. */ - new_mtx = vm_page_lockptr(m); - if (m_mtx != new_mtx) { - if (m_mtx != NULL) - mtx_unlock(m_mtx); - m_mtx = new_mtx; - mtx_lock(m_mtx); - } + vm_page_change_lock(m, &m_mtx); retry: if (m->wire_count != 0 || m->hold_count != 0) error = EBUSY; @@ -2365,12 +2358,7 @@ retry: * The new page must be deactivated * before the object is unlocked. */ - new_mtx = vm_page_lockptr(m_new); - if (m_mtx != new_mtx) { - mtx_unlock(m_mtx); - m_mtx = new_mtx; - mtx_lock(m_mtx); - } + vm_page_change_lock(m_new, &m_mtx); vm_page_deactivate(m_new); } else { m->flags &= ~PG_ZERO; Modified: head/sys/vm/vm_page.h ============================================================================== --- head/sys/vm/vm_page.h Sat Sep 9 16:04:49 2017 (r323367) +++ head/sys/vm/vm_page.h Sat Sep 9 17:35:19 2017 (r323368) @@ -470,6 +470,7 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_ u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); vm_page_t vm_page_alloc_freelist(int, int); +void vm_page_change_lock(vm_page_t m, struct mtx **mtx); vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201709091735.v89HZJV4034082>