From owner-svn-src-stable-11@freebsd.org Sat Sep 16 13:49:28 2017 Return-Path: Delivered-To: svn-src-stable-11@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 20E61E17A82; Sat, 16 Sep 2017 13:49:28 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id EECBA76A06; Sat, 16 Sep 2017 13:49:27 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id v8GDnRCR081625; Sat, 16 Sep 2017 13:49:27 GMT (envelope-from kib@FreeBSD.org) Received: (from kib@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id v8GDnQo1081622; Sat, 16 Sep 2017 13:49:26 GMT (envelope-from kib@FreeBSD.org) Message-Id: <201709161349.v8GDnQo1081622@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: kib set sender to kib@FreeBSD.org using -f From: Konstantin Belousov Date: Sat, 16 Sep 2017 13:49:26 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org Subject: svn commit: r323638 - stable/11/sys/vm X-SVN-Group: stable-11 X-SVN-Commit-Author: kib X-SVN-Commit-Paths: stable/11/sys/vm X-SVN-Commit-Revision: 323638 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-stable-11@freebsd.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: SVN commit messages for only the 11-stable src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 16 Sep 2017 13:49:28 -0000 Author: kib Date: Sat Sep 16 13:49:26 2017 New Revision: 323638 URL: https://svnweb.freebsd.org/changeset/base/323638 Log: MFC r323368: Add a vm_page_change_lock() helper. Modified: stable/11/sys/vm/vm_object.c stable/11/sys/vm/vm_page.c stable/11/sys/vm/vm_page.h Directory Properties: stable/11/ (props changed) Modified: stable/11/sys/vm/vm_object.c ============================================================================== --- stable/11/sys/vm/vm_object.c Sat Sep 16 05:42:27 2017 (r323637) +++ stable/11/sys/vm/vm_object.c Sat Sep 16 13:49:26 2017 (r323638) @@ -1910,6 +1910,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t int options) { vm_page_t p, next; + struct mtx *mtx; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & OBJ_UNMANAGED) == 0 || @@ -1920,6 +1921,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t vm_object_pip_add(object, 1); again: p = vm_page_find_least(object, start); + mtx = NULL; /* * Here, the variable "p" is either (1) the page with the least pindex @@ -1936,7 +1938,7 @@ again: * however, be invalidated if the option OBJPR_CLEANONLY is * not specified. */ - vm_page_lock(p); + vm_page_change_lock(p, &mtx); if (vm_page_xbusied(p)) { VM_OBJECT_WUNLOCK(object); vm_page_busy_sleep(p, "vmopax", true); @@ -1950,7 +1952,7 @@ again: p->valid = 0; vm_page_undirty(p); } - goto next; + continue; } if (vm_page_busied(p)) { VM_OBJECT_WUNLOCK(object); @@ -1964,14 +1966,14 @@ again: if ((options & OBJPR_NOTMAPPED) == 0) pmap_remove_write(p); if (p->dirty) - goto next; + continue; } if ((options & OBJPR_NOTMAPPED) == 0) pmap_remove_all(p); vm_page_free(p); -next: - vm_page_unlock(p); } + if (mtx != NULL) + mtx_unlock(mtx); vm_object_pip_wakeup(object); } @@ -1994,7 +1996,7 @@ next: void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) { - struct mtx *mtx, *new_mtx; + struct mtx *mtx; vm_page_t p, next; VM_OBJECT_ASSERT_LOCKED(object); @@ -2011,17 +2013,7 @@ vm_object_page_noreuse(vm_object_t object, vm_pindex_t mtx = NULL; for (; p != NULL && (p->pindex < end || end == 0); p = next) { next = TAILQ_NEXT(p, listq); - - /* - * Avoid releasing and reacquiring the same page lock. - */ - new_mtx = vm_page_lockptr(p); - if (mtx != new_mtx) { - if (mtx != NULL) - mtx_unlock(mtx); - mtx = new_mtx; - mtx_lock(mtx); - } + vm_page_change_lock(p, &mtx); vm_page_deactivate_noreuse(p); } if (mtx != NULL) Modified: stable/11/sys/vm/vm_page.c ============================================================================== --- stable/11/sys/vm/vm_page.c Sat Sep 16 05:42:27 2017 (r323637) +++ stable/11/sys/vm/vm_page.c Sat Sep 16 13:49:26 2017 (r323638) @@ -905,6 +905,23 @@ vm_page_flash(vm_page_t m) } /* + * Avoid releasing and reacquiring the same page lock. + */ +void +vm_page_change_lock(vm_page_t m, struct mtx **mtx) +{ + struct mtx *mtx1; + + mtx1 = vm_page_lockptr(m); + if (*mtx == mtx1) + return; + if (*mtx != NULL) + mtx_unlock(*mtx); + *mtx = mtx1; + mtx_lock(mtx1); +} + +/* * Keep page from being freed by the page daemon * much of the same effect as wiring, except much lower * overhead and should be used only for *very* temporary @@ -937,20 +954,11 @@ vm_page_unhold(vm_page_t mem) void vm_page_unhold_pages(vm_page_t *ma, int count) { - struct mtx *mtx, *new_mtx; + struct mtx *mtx; mtx = NULL; for (; count != 0; count--) { - /* - * Avoid releasing and reacquiring the same page lock. - */ - new_mtx = vm_page_lockptr(*ma); - if (mtx != new_mtx) { - if (mtx != NULL) - mtx_unlock(mtx); - mtx = new_mtx; - mtx_lock(mtx); - } + vm_page_change_lock(*ma, &mtx); vm_page_unhold(*ma); ma++; } @@ -1989,7 +1997,7 @@ vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options) { - struct mtx *m_mtx, *new_mtx; + struct mtx *m_mtx; vm_object_t object; vm_paddr_t pa; vm_page_t m, m_run; @@ -2032,16 +2040,7 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, } else KASSERT(m_run != NULL, ("m_run == NULL")); - /* - * Avoid releasing and reacquiring the same page lock. - */ - new_mtx = vm_page_lockptr(m); - if (m_mtx != new_mtx) { - if (m_mtx != NULL) - mtx_unlock(m_mtx); - m_mtx = new_mtx; - mtx_lock(m_mtx); - } + vm_page_change_lock(m, &m_mtx); m_inc = 1; retry: if (m->wire_count != 0 || m->hold_count != 0) @@ -2191,7 +2190,7 @@ static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, vm_paddr_t high) { - struct mtx *m_mtx, *new_mtx; + struct mtx *m_mtx; struct spglist free; vm_object_t object; vm_paddr_t pa; @@ -2212,13 +2211,7 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_p /* * Avoid releasing and reacquiring the same page lock. */ - new_mtx = vm_page_lockptr(m); - if (m_mtx != new_mtx) { - if (m_mtx != NULL) - mtx_unlock(m_mtx); - m_mtx = new_mtx; - mtx_lock(m_mtx); - } + vm_page_change_lock(m, &m_mtx); retry: if (m->wire_count != 0 || m->hold_count != 0) error = EBUSY; @@ -2331,12 +2324,7 @@ retry: * The new page must be deactivated * before the object is unlocked. */ - new_mtx = vm_page_lockptr(m_new); - if (m_mtx != new_mtx) { - mtx_unlock(m_mtx); - m_mtx = new_mtx; - mtx_lock(m_mtx); - } + vm_page_change_lock(m_new, &m_mtx); vm_page_deactivate(m_new); } else { m->flags &= ~PG_ZERO; Modified: stable/11/sys/vm/vm_page.h ============================================================================== --- stable/11/sys/vm/vm_page.h Sat Sep 16 05:42:27 2017 (r323637) +++ stable/11/sys/vm/vm_page.h Sat Sep 16 13:49:26 2017 (r323638) @@ -448,6 +448,7 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_ u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); vm_page_t vm_page_alloc_freelist(int, int); +void vm_page_change_lock(vm_page_t m, struct mtx **mtx); vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); int vm_page_try_to_free (vm_page_t); void vm_page_deactivate (vm_page_t);