Date: Tue, 1 Sep 2015 06:21:12 +0000 (UTC) From: Alan Cox <alc@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r287344 - head/sys/vm Message-ID: <201509010621.t816LCZ4034925@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: alc Date: Tue Sep 1 06:21:12 2015 New Revision: 287344 URL: https://svnweb.freebsd.org/changeset/base/287344 Log: Handle held pages earlier in the inactive queue scan. Reviewed by: kib Sponsored by: EMC / Isilon Storage Division Modified: head/sys/vm/vm_pageout.c Modified: head/sys/vm/vm_pageout.c ============================================================================== --- head/sys/vm/vm_pageout.c Tue Sep 1 06:05:43 2015 (r287343) +++ head/sys/vm/vm_pageout.c Tue Sep 1 06:21:12 2015 (r287344) @@ -1125,31 +1125,45 @@ vm_pageout_scan(struct vm_domain *vmd, i * different position within the queue. In either * case, addl_page_shortage should not be incremented. */ - if (!vm_pageout_page_lock(m, &next)) { - vm_page_unlock(m); - continue; + if (!vm_pageout_page_lock(m, &next)) + goto unlock_page; + else if (m->hold_count != 0) { + /* + * Held pages are essentially stuck in the + * queue. So, they ought to be discounted + * from the inactive count. See the + * calculation of the page_shortage for the + * loop over the active queue below. + */ + addl_page_shortage++; + goto unlock_page; } object = m->object; - if (!VM_OBJECT_TRYWLOCK(object) && - !vm_pageout_fallback_object_lock(m, &next)) { - vm_page_unlock(m); - VM_OBJECT_WUNLOCK(object); - continue; + if (!VM_OBJECT_TRYWLOCK(object)) { + if (!vm_pageout_fallback_object_lock(m, &next)) + goto unlock_object; + else if (m->hold_count != 0) { + addl_page_shortage++; + goto unlock_object; + } } - - /* - * Don't mess with busy pages, keep them at at the - * front of the queue, most likely they are being - * paged out. Increment addl_page_shortage for busy - * pages, because they may leave the inactive queue - * shortly after page scan is finished. - */ if (vm_page_busied(m)) { - vm_page_unlock(m); - VM_OBJECT_WUNLOCK(object); + /* + * Don't mess with busy pages. Leave them at + * the front of the queue. Most likely, they + * are being paged out and will leave the + * queue shortly after the scan finishes. So, + * they ought to be discounted from the + * inactive count. + */ addl_page_shortage++; +unlock_object: + VM_OBJECT_WUNLOCK(object); +unlock_page: + vm_page_unlock(m); continue; } + KASSERT(m->hold_count == 0, ("Held page %p", m)); /* * We unlock the inactive page queue, invalidating the @@ -1164,7 +1178,7 @@ vm_pageout_scan(struct vm_domain *vmd, i * Invalid pages can be easily freed. They cannot be * mapped, vm_page_free() asserts this. */ - if (m->valid == 0 && m->hold_count == 0) { + if (m->valid == 0) { vm_page_free(m); PCPU_INC(cnt.v_dfree); --page_shortage; @@ -1208,18 +1222,6 @@ vm_pageout_scan(struct vm_domain *vmd, i goto drop_page; } - if (m->hold_count != 0) { - /* - * Held pages are essentially stuck in the - * queue. So, they ought to be discounted - * from the inactive count. See the - * calculation of the page_shortage for the - * loop over the active queue below. - */ - addl_page_shortage++; - goto drop_page; - } - /* * If the page appears to be clean at the machine-independent * layer, then remove all of its mappings from the pmap in
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201509010621.t816LCZ4034925>