From owner-svn-src-head@freebsd.org Sat Feb 1 18:23:51 2020 Return-Path: Delivered-To: svn-src-head@mailman.nyi.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.nyi.freebsd.org (Postfix) with ESMTP id DB28D1FF087; Sat, 1 Feb 2020 18:23:51 +0000 (UTC) (envelope-from markj@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) server-signature RSA-PSS (4096 bits) client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 4892Xl54XHz3NGb; Sat, 1 Feb 2020 18:23:51 +0000 (UTC) (envelope-from markj@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id A974D2FCC; Sat, 1 Feb 2020 18:23:51 +0000 (UTC) (envelope-from markj@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id 011INpcj074270; Sat, 1 Feb 2020 18:23:51 GMT (envelope-from markj@FreeBSD.org) Received: (from markj@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id 011INpJo074268; Sat, 1 Feb 2020 18:23:51 GMT (envelope-from markj@FreeBSD.org) Message-Id: <202002011823.011INpJo074268@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: markj set sender to markj@FreeBSD.org using -f From: Mark Johnston Date: Sat, 1 Feb 2020 18:23:51 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r357374 - head/sys/vm X-SVN-Group: head X-SVN-Commit-Author: markj X-SVN-Commit-Paths: head/sys/vm X-SVN-Commit-Revision: 357374 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 01 Feb 2020 18:23:51 -0000 Author: markj Date: Sat Feb 1 18:23:51 2020 New Revision: 357374 URL: https://svnweb.freebsd.org/changeset/base/357374 Log: Remove a couple of lingering usages of the page lock. Update vm_page_scan_contig() and vm_page_reclaim_run() to stop using vm_page_change_lock(). It has no use after r356157. Remove vm_page_change_lock() now that it has no users. Remove an unncessary check for wirings in vm_page_scan_contig(), which was previously checking twice. The check is racy until vm_page_reclaim_run() ensures that the page is unmapped, so one check is sufficient. Reviewed by: jeff, kib (previous versions) Tested by: pho (previous version) Differential Revision: https://reviews.freebsd.org/D23279 Modified: head/sys/vm/vm_page.c head/sys/vm/vm_page.h Modified: head/sys/vm/vm_page.c ============================================================================== --- head/sys/vm/vm_page.c Sat Feb 1 17:54:46 2020 (r357373) +++ head/sys/vm/vm_page.c Sat Feb 1 18:23:51 2020 (r357374) @@ -1156,23 +1156,6 @@ vm_page_xunbusy_hard_unchecked(vm_page_t m) } /* - * Avoid releasing and reacquiring the same page lock. - */ -void -vm_page_change_lock(vm_page_t m, struct mtx **mtx) -{ - struct mtx *mtx1; - - mtx1 = vm_page_lockptr(m); - if (*mtx == mtx1) - return; - if (*mtx != NULL) - mtx_unlock(*mtx); - *mtx = mtx1; - mtx_lock(mtx1); -} - -/* * vm_page_unhold_pages: * * Unhold each of the pages that is referenced by the given array. @@ -2444,7 +2427,6 @@ vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options) { - struct mtx *m_mtx; vm_object_t object; vm_paddr_t pa; vm_page_t m, m_run; @@ -2458,7 +2440,6 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, KASSERT(powerof2(boundary), ("boundary is not a power of 2")); m_run = NULL; run_len = 0; - m_mtx = NULL; for (m = m_start; m < m_end && run_len < npages; m += m_inc) { KASSERT((m->flags & PG_MARKER) == 0, ("page %p is PG_MARKER", m)); @@ -2489,9 +2470,8 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, } else KASSERT(m_run != NULL, ("m_run == NULL")); - vm_page_change_lock(m, &m_mtx); - m_inc = 1; retry: + m_inc = 1; if (vm_page_wired(m)) run_ext = 0; #if VM_NRESERVLEVEL > 0 @@ -2504,23 +2484,17 @@ retry: pa); } #endif - else if ((object = m->object) != NULL) { + else if ((object = + (vm_object_t)atomic_load_ptr(&m->object)) != NULL) { /* * The page is considered eligible for relocation if * and only if it could be laundered or reclaimed by * the page daemon. */ - if (!VM_OBJECT_TRYRLOCK(object)) { - mtx_unlock(m_mtx); - VM_OBJECT_RLOCK(object); - mtx_lock(m_mtx); - if (m->object != object) { - /* - * The page may have been freed. - */ - VM_OBJECT_RUNLOCK(object); - goto retry; - } + VM_OBJECT_RLOCK(object); + if (object != m->object) { + VM_OBJECT_RUNLOCK(object); + goto retry; } /* Don't care: PG_NODUMP, PG_ZERO. */ if (object->type != OBJT_DEFAULT && @@ -2537,8 +2511,7 @@ retry: vm_reserv_size(level)) - pa); #endif } else if (object->memattr == VM_MEMATTR_DEFAULT && - vm_page_queue(m) != PQ_NONE && !vm_page_busied(m) && - !vm_page_wired(m)) { + vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { /* * The page is allocated but eligible for * relocation. Extend the current run by one @@ -2605,8 +2578,6 @@ retry: } } } - if (m_mtx != NULL) - mtx_unlock(m_mtx); if (run_len >= npages) return (m_run); return (NULL); @@ -2634,7 +2605,6 @@ vm_page_reclaim_run(int req_class, int domain, u_long vm_paddr_t high) { struct vm_domain *vmd; - struct mtx *m_mtx; struct spglist free; vm_object_t object; vm_paddr_t pa; @@ -2647,42 +2617,28 @@ vm_page_reclaim_run(int req_class, int domain, u_long error = 0; m = m_run; m_end = m_run + npages; - m_mtx = NULL; for (; error == 0 && m < m_end; m++) { KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, ("page %p is PG_FICTITIOUS or PG_MARKER", m)); /* - * Avoid releasing and reacquiring the same page lock. + * Racily check for wirings. Races are handled once the object + * lock is held and the page is unmapped. */ - vm_page_change_lock(m, &m_mtx); -retry: - /* - * Racily check for wirings. Races are handled below. - */ if (vm_page_wired(m)) error = EBUSY; - else if ((object = m->object) != NULL) { + else if ((object = + (vm_object_t)atomic_load_ptr(&m->object)) != NULL) { /* * The page is relocated if and only if it could be * laundered or reclaimed by the page daemon. */ - if (!VM_OBJECT_TRYWLOCK(object)) { - mtx_unlock(m_mtx); - VM_OBJECT_WLOCK(object); - mtx_lock(m_mtx); - if (m->object != object) { - /* - * The page may have been freed. - */ - VM_OBJECT_WUNLOCK(object); - goto retry; - } - } + VM_OBJECT_WLOCK(object); /* Don't care: PG_NODUMP, PG_ZERO. */ - if (object->type != OBJT_DEFAULT && + if (m->object != object || + (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP && - object->type != OBJT_VNODE) + object->type != OBJT_VNODE)) error = EINVAL; else if (object->memattr != VM_MEMATTR_DEFAULT) error = EINVAL; @@ -2781,7 +2737,6 @@ retry: * The new page must be deactivated * before the object is unlocked. */ - vm_page_change_lock(m_new, &m_mtx); vm_page_deactivate(m_new); } else { m->flags &= ~PG_ZERO; @@ -2821,8 +2776,6 @@ unlock: error = EINVAL; } } - if (m_mtx != NULL) - mtx_unlock(m_mtx); if ((m = SLIST_FIRST(&free)) != NULL) { int cnt; Modified: head/sys/vm/vm_page.h ============================================================================== --- head/sys/vm/vm_page.h Sat Feb 1 17:54:46 2020 (r357373) +++ head/sys/vm/vm_page.h Sat Feb 1 18:23:51 2020 (r357374) @@ -609,7 +609,6 @@ vm_page_t vm_page_alloc_freelist(int, int); vm_page_t vm_page_alloc_freelist_domain(int, int, int); void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set); bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose); -void vm_page_change_lock(vm_page_t m, struct mtx **mtx); vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count);