Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 2 Jun 2018 00:01:07 +0000 (UTC)
From:      Mark Johnston <markj@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r334508 - head/sys/vm
Message-ID:  <201806020001.w52017mQ044135@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: markj
Date: Sat Jun  2 00:01:07 2018
New Revision: 334508
URL: https://svnweb.freebsd.org/changeset/base/334508

Log:
  Remove the "pass" variable from the page daemon control loop.
  
  It serves little purpose after r308474 and r329882.  As a side
  effect, the removal fixes a bug in r329882 which caused the
  page daemon to periodically invoke lowmem handlers even in the
  absence of memory pressure.
  
  Reviewed by:	jeff
  Differential Revision:	https://reviews.freebsd.org/D15491

Modified:
  head/sys/vm/vm_pageout.c

Modified: head/sys/vm/vm_pageout.c
==============================================================================
--- head/sys/vm/vm_pageout.c	Fri Jun  1 23:50:51 2018	(r334507)
+++ head/sys/vm/vm_pageout.c	Sat Jun  2 00:01:07 2018	(r334508)
@@ -1352,7 +1352,7 @@ vm_pageout_reinsert_inactive(struct scan_state *ss, st
  * target.
  */
 static int
-vm_pageout_scan_inactive(struct vm_domain *vmd, int pass, int shortage,
+vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
     int *addl_shortage)
 {
 	struct scan_state ss;
@@ -1366,25 +1366,6 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int pa
 	bool obj_locked;
 
 	/*
-	 * If we need to reclaim memory ask kernel caches to return
-	 * some.  We rate limit to avoid thrashing.
-	 */
-	if (vmd == VM_DOMAIN(0) && pass > 0 &&
-	    (time_uptime - lowmem_uptime) >= lowmem_period) {
-		/*
-		 * Decrease registered cache sizes.
-		 */
-		SDT_PROBE0(vm, , , vm__lowmem_scan);
-		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
-		/*
-		 * We do this explicitly after the caches have been
-		 * drained above.
-		 */
-		uma_reclaim();
-		lowmem_uptime = time_uptime;
-	}
-
-	/*
 	 * The addl_page_shortage is an estimate of the number of temporarily
 	 * stuck pages in the inactive queue.  In other words, the
 	 * number of pages from the inactive count that should be
@@ -1393,16 +1374,13 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int pa
 	addl_page_shortage = 0;
 
 	/*
-	 * Calculate the number of pages that we want to free.  This number
-	 * can be negative if many pages are freed between the wakeup call to
-	 * the page daemon and this calculation.
+	 * vmd_pageout_deficit counts the number of pages requested in
+	 * allocations that failed because of a free page shortage.  We assume
+	 * that the allocations will be reattempted and thus include the deficit
+	 * in our scan target.
 	 */
-	if (pass > 0) {
-		deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
-		page_shortage = shortage + deficit;
-	} else
-		page_shortage = deficit = 0;
-	starting_page_shortage = page_shortage;
+	deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
+	starting_page_shortage = page_shortage = shortage + deficit;
 
 	mtx = NULL;
 	obj_locked = false;
@@ -1638,8 +1616,7 @@ reinsert:
 	/*
 	 * Reclaim pages by swapping out idle processes, if configured to do so.
 	 */
-	if (pass > 0)
-		vm_swapout_run_idle();
+	vm_swapout_run_idle();
 
 	/*
 	 * See the description of addl_page_shortage above.
@@ -1870,15 +1847,35 @@ vm_pageout_oom(int shortage)
 }
 
 static void
+vm_pageout_lowmem(struct vm_domain *vmd)
+{
+
+	if (vmd == VM_DOMAIN(0) &&
+	    time_uptime - lowmem_uptime >= lowmem_period) {
+		/*
+		 * Decrease registered cache sizes.
+		 */
+		SDT_PROBE0(vm, , , vm__lowmem_scan);
+		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
+
+		/*
+		 * We do this explicitly after the caches have been
+		 * drained above.
+		 */
+		uma_reclaim();
+		lowmem_uptime = time_uptime;
+	}
+}
+
+static void
 vm_pageout_worker(void *arg)
 {
 	struct vm_domain *vmd;
-	int addl_shortage, domain, pass, shortage;
+	int addl_shortage, domain, shortage;
 	bool target_met;
 
 	domain = (uintptr_t)arg;
 	vmd = VM_DOMAIN(domain);
-	pass = 0;
 	shortage = 0;
 	target_met = true;
 
@@ -1896,6 +1893,7 @@ vm_pageout_worker(void *arg)
 	 */
 	while (TRUE) {
 		vm_domain_pageout_lock(vmd);
+
 		/*
 		 * We need to clear wanted before we check the limits.  This
 		 * prevents races with wakers who will check wanted after they
@@ -1908,12 +1906,12 @@ vm_pageout_worker(void *arg)
 		 */
 		if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
 			/*
-			 * Yes, the scan failed to free enough pages.  If
-			 * we have performed a level >= 1 (page reclamation)
-			 * scan, then sleep a bit and try again.
+			 * Yes.  If the scan failed to produce enough free
+			 * pages, sleep uninterruptibly for some time in the
+			 * hope that the laundry thread will clean some pages.
 			 */
 			vm_domain_pageout_unlock(vmd);
-			if (pass > 1)
+			if (!target_met)
 				pause("pwait", hz / VM_INACT_SCAN_RATE);
 		} else {
 			/*
@@ -1934,10 +1932,12 @@ vm_pageout_worker(void *arg)
 		 * this interval, and scan the inactive queue.
 		 */
 		shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
-		if (shortage > 0 && pass == 0)
-			pass = 1;
-		target_met = vm_pageout_scan_inactive(vmd, pass, shortage,
-		    &addl_shortage);
+		if (shortage > 0) {
+			vm_pageout_lowmem(vmd);
+			target_met = vm_pageout_scan_inactive(vmd, shortage,
+			    &addl_shortage);
+		} else
+			addl_shortage = 0;
 
 		/*
 		 * Scan the active queue.  A positive value for shortage
@@ -1946,13 +1946,6 @@ vm_pageout_worker(void *arg)
 		 */
 		shortage = vm_pageout_active_target(vmd) + addl_shortage;
 		vm_pageout_scan_active(vmd, shortage);
-
-		/*
-		 * If the target was not met we must increase the pass to
-		 * more aggressively reclaim.
-		 */
-		if (!target_met)
-			pass++;
 	}
 }
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201806020001.w52017mQ044135>