Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 16 Jul 2012 18:13:43 +0000 (UTC)
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r238536 - head/sys/vm
Message-ID:  <201207161813.q6GIDhm1068181@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: alc
Date: Mon Jul 16 18:13:43 2012
New Revision: 238536
URL: http://svn.freebsd.org/changeset/base/238536

Log:
  Various improvements to vm_contig_grow_cache().  Most notably, even when
  it can't sleep, it can still move clean pages from the inactive queue to
  the cache.  Also, when a page is cached, there is no need to restart the
  scan.  The "next" page pointer held by vm_contig_launder() is still
  valid.  Finally, add a comment summarizing what vm_contig_grow_cache()
  does based upon the value of "tries".
  
  MFC after:	3 weeks

Modified:
  head/sys/vm/vm_contig.c

Modified: head/sys/vm/vm_contig.c
==============================================================================
--- head/sys/vm/vm_contig.c	Mon Jul 16 17:48:43 2012	(r238535)
+++ head/sys/vm/vm_contig.c	Mon Jul 16 18:13:43 2012	(r238536)
@@ -83,7 +83,7 @@ __FBSDID("$FreeBSD$");
 #include <vm/vm_pager.h>
 
 static int
-vm_contig_launder_page(vm_page_t m, vm_page_t *next)
+vm_contig_launder_page(vm_page_t m, vm_page_t *next, int tries)
 {
 	vm_object_t object;
 	vm_page_t m_tmp;
@@ -92,7 +92,10 @@ vm_contig_launder_page(vm_page_t m, vm_p
 	int vfslocked;
 
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	vm_page_lock_assert(m, MA_OWNED);
+	if (!vm_pageout_page_lock(m, next) || m->hold_count != 0) {
+		vm_page_unlock(m);
+		return (EAGAIN);
+	}
 	object = m->object;
 	if (!VM_OBJECT_TRYLOCK(object) &&
 	    (!vm_pageout_fallback_object_lock(m, next) || m->hold_count != 0)) {
@@ -100,7 +103,13 @@ vm_contig_launder_page(vm_page_t m, vm_p
 		VM_OBJECT_UNLOCK(object);
 		return (EAGAIN);
 	}
-	if (vm_page_sleep_if_busy(m, TRUE, "vpctw0")) {
+	if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) {
+		if (tries == 0) {
+			vm_page_unlock(m);
+			VM_OBJECT_UNLOCK(object);
+			return (EAGAIN);
+		}
+		vm_page_sleep(m, "vpctw0");
 		VM_OBJECT_UNLOCK(object);
 		vm_page_lock_queues();
 		return (EBUSY);
@@ -110,7 +119,7 @@ vm_contig_launder_page(vm_page_t m, vm_p
 		pmap_remove_all(m);
 	if (m->dirty != 0) {
 		vm_page_unlock(m);
-		if ((object->flags & OBJ_DEAD) != 0) {
+		if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
 			VM_OBJECT_UNLOCK(object);
 			return (EAGAIN);
 		}
@@ -146,34 +155,25 @@ vm_contig_launder_page(vm_page_t m, vm_p
 		vm_page_unlock(m);
 	}
 	VM_OBJECT_UNLOCK(object);
-	return (0);
+	return (EAGAIN);
 }
 
 static int
-vm_contig_launder(int queue, vm_paddr_t low, vm_paddr_t high)
+vm_contig_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
 {
 	vm_page_t m, next;
 	vm_paddr_t pa;
 	int error;
 
 	TAILQ_FOREACH_SAFE(m, &vm_page_queues[queue].pl, pageq, next) {
-
-		/* Skip marker pages */
+		KASSERT(m->queue == queue,
+		    ("vm_contig_launder: page %p's queue is not %d", m, queue));
 		if ((m->flags & PG_MARKER) != 0)
 			continue;
-
 		pa = VM_PAGE_TO_PHYS(m);
 		if (pa < low || pa + PAGE_SIZE > high)
 			continue;
-
-		if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
-			vm_page_unlock(m);
-			continue;
-		}
-		KASSERT(m->queue == queue,
-		    ("vm_contig_launder: page %p's queue is not %d", m, queue));
-		error = vm_contig_launder_page(m, &next);
-		vm_page_lock_assert(m, MA_NOTOWNED);
+		error = vm_contig_launder_page(m, &next, tries);
 		if (error == 0)
 			return (TRUE);
 		if (error == EBUSY)
@@ -183,7 +183,15 @@ vm_contig_launder(int queue, vm_paddr_t 
 }
 
 /*
- * Increase the number of cached pages.
+ * Increase the number of cached pages.  The specified value, "tries",
+ * determines which categories of pages are cached:
+ *
+ *  0: All clean, inactive pages within the specified physical address range
+ *     are cached.  Will not sleep.
+ *  1: The vm_lowmem handlers are called.  All inactive pages within
+ *     the specified physical address range are cached.  May sleep.
+ *  2: The vm_lowmem handlers are called.  All inactive and active pages
+ *     within the specified physical address range are cached.  May sleep.
  */
 void
 vm_contig_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
@@ -206,15 +214,16 @@ vm_contig_grow_cache(int tries, vm_paddr
 	}
 	vm_page_lock_queues();
 	inactl = 0;
-	inactmax = tries < 1 ? 0 : cnt.v_inactive_count;
+	inactmax = cnt.v_inactive_count;
 	actl = 0;
 	actmax = tries < 2 ? 0 : cnt.v_active_count;
 again:
-	if (inactl < inactmax && vm_contig_launder(PQ_INACTIVE, low, high)) {
+	if (inactl < inactmax && vm_contig_launder(PQ_INACTIVE, tries, low,
+	    high)) {
 		inactl++;
 		goto again;
 	}
-	if (actl < actmax && vm_contig_launder(PQ_ACTIVE, low, high)) {
+	if (actl < actmax && vm_contig_launder(PQ_ACTIVE, tries, low, high)) {
 		actl++;
 		goto again;
 	}



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201207161813.q6GIDhm1068181>