From owner-svn-src-all@freebsd.org Mon Dec 7 11:21:51 2015 Return-Path: Delivered-To: svn-src-all@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 5A75A9B9982; Mon, 7 Dec 2015 11:21:51 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 18C5F1C93; Mon, 7 Dec 2015 11:21:51 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id tB7BLo3J010392; Mon, 7 Dec 2015 11:21:50 GMT (envelope-from kib@FreeBSD.org) Received: (from kib@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id tB7BLoI6010390; Mon, 7 Dec 2015 11:21:50 GMT (envelope-from kib@FreeBSD.org) Message-Id: <201512071121.tB7BLoI6010390@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: kib set sender to kib@FreeBSD.org using -f From: Konstantin Belousov Date: Mon, 7 Dec 2015 11:21:50 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r291935 - stable/10/sys/vm X-SVN-Group: stable-10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 07 Dec 2015 11:21:51 -0000 Author: kib Date: Mon Dec 7 11:21:49 2015 New Revision: 291935 URL: https://svnweb.freebsd.org/changeset/base/291935 Log: MFC r290920: Raise OOM when pagedaemon is unable to produce a free page in several back-to-back passes. Modified: stable/10/sys/vm/vm_page.h stable/10/sys/vm/vm_pageout.c Directory Properties: stable/10/ (props changed) Modified: stable/10/sys/vm/vm_page.h ============================================================================== --- stable/10/sys/vm/vm_page.h Mon Dec 7 11:14:57 2015 (r291934) +++ stable/10/sys/vm/vm_page.h Mon Dec 7 11:21:49 2015 (r291935) @@ -227,6 +227,7 @@ struct vm_domain { long vmd_segs; /* bitmask of the segments */ boolean_t vmd_oom; int vmd_pass; /* local pagedaemon pass */ + int vmd_oom_seq; int vmd_last_active_scan; struct vm_page vmd_marker; /* marker for pagedaemon private use */ }; Modified: stable/10/sys/vm/vm_pageout.c ============================================================================== --- stable/10/sys/vm/vm_pageout.c Mon Dec 7 11:14:57 2015 (r291934) +++ stable/10/sys/vm/vm_pageout.c Mon Dec 7 11:21:49 2015 (r291935) @@ -121,7 +121,8 @@ static void vm_pageout(void); static void vm_pageout_init(void); static int vm_pageout_clean(vm_page_t); static void vm_pageout_scan(struct vm_domain *vmd, int pass); -static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass); +static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, + int starting_page_shortage); SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, NULL); @@ -158,6 +159,7 @@ int vm_pages_needed; /* Event on which int vm_pageout_deficit; /* Estimated number of pages deficit */ int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ int vm_pageout_wakeup_thresh; +static int vm_pageout_oom_seq = 12; #if !defined(NO_SWAPPING) static int vm_pageout_req_swapout; /* XXX */ @@ -217,6 +219,10 @@ static int pageout_lock_miss; SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); +SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, + CTLFLAG_RW, &vm_pageout_oom_seq, 0, + "back-to-back calls to oom detector to start OOM"); + #define VM_PAGEOUT_PAGE_COUNT 16 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; @@ -941,7 +947,7 @@ vm_pageout_scan(struct vm_domain *vmd, i long min_scan; int act_delta, addl_page_shortage, deficit, maxscan, page_shortage; int vnodes_skipped = 0; - int maxlaunder, scan_tick, scanned; + int maxlaunder, scan_tick, scanned, starting_page_shortage; int lockmode; boolean_t queues_locked; @@ -981,6 +987,7 @@ vm_pageout_scan(struct vm_domain *vmd, i page_shortage = vm_paging_target() + deficit; } else page_shortage = deficit = 0; + starting_page_shortage = page_shortage; /* * maxlaunder limits the number of dirty pages we flush per scan. @@ -1358,6 +1365,12 @@ relock_queues: (void)speedup_syncer(); /* + * If the inactive queue scan fails repeatedly to meet its + * target, kill the largest process. + */ + vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); + + /* * Compute the number of pages we want to try to move from the * active queue to the inactive queue. */ @@ -1469,15 +1482,6 @@ relock_queues: } } #endif - - /* - * If we are critically low on one of RAM or swap and low on - * the other, kill the largest process. However, we avoid - * doing this on the first pass in order to give ourselves a - * chance to flush out dirty vnode-backed pages and to allow - * active pages to be moved to the inactive queue and reclaimed. - */ - vm_pageout_mightbe_oom(vmd, pass); } static int vm_pageout_oom_vote; @@ -1488,12 +1492,17 @@ static int vm_pageout_oom_vote; * failed to reach free target is premature. */ static void -vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass) +vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, + int starting_page_shortage) { int old_vote; - if (pass <= 1 || !((swap_pager_avail < 64 && vm_page_count_min()) || - (swap_pager_full && vm_paging_target() > 0))) { + if (starting_page_shortage <= 0 || starting_page_shortage != + page_shortage) + vmd->vmd_oom_seq = 0; + else + vmd->vmd_oom_seq++; + if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { if (vmd->vmd_oom) { vmd->vmd_oom = FALSE; atomic_subtract_int(&vm_pageout_oom_vote, 1); @@ -1501,6 +1510,12 @@ vm_pageout_mightbe_oom(struct vm_domain return; } + /* + * Do not follow the call sequence until OOM condition is + * cleared. + */ + vmd->vmd_oom_seq = 0; + if (vmd->vmd_oom) return;