Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 25 Aug 2014 21:16:57 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r270628 - stable/10/sys/vm
Message-ID:  <201408252116.s7PLGvOV039666@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Mon Aug 25 21:16:57 2014
New Revision: 270628
URL: http://svnweb.freebsd.org/changeset/base/270628

Log:
  MFC r261412 (by alc):
  Make prefaulting more aggressive on hard faults.

Modified:
  stable/10/sys/vm/vm_fault.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/vm/vm_fault.c
==============================================================================
--- stable/10/sys/vm/vm_fault.c	Mon Aug 25 20:49:25 2014	(r270627)
+++ stable/10/sys/vm/vm_fault.c	Mon Aug 25 21:16:57 2014	(r270628)
@@ -104,17 +104,8 @@ __FBSDID("$FreeBSD$");
 
 #define PFBAK 4
 #define PFFOR 4
-#define PAGEORDER_SIZE (PFBAK+PFFOR)
-
-static int prefault_pageorder[] = {
-	-1 * PAGE_SIZE, 1 * PAGE_SIZE,
-	-2 * PAGE_SIZE, 2 * PAGE_SIZE,
-	-3 * PAGE_SIZE, 3 * PAGE_SIZE,
-	-4 * PAGE_SIZE, 4 * PAGE_SIZE
-};
 
 static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
-static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
 
 #define	VM_FAULT_READ_BEHIND	8
 #define	VM_FAULT_READ_MAX	(1 + VM_FAULT_READ_AHEAD_MAX)
@@ -136,6 +127,8 @@ struct faultstate {
 };
 
 static void vm_fault_cache_behind(const struct faultstate *fs, int distance);
+static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
+	    int faultcount, int reqpage);
 
 static inline void
 release_page(struct faultstate *fs)
@@ -911,7 +904,7 @@ vnode_locked:
 	pmap_enter(fs.map->pmap, vaddr, fs.m, prot,
 	    fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0);
 	if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
-		vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
+		vm_fault_prefault(&fs, vaddr, faultcount, reqpage);
 	VM_OBJECT_WLOCK(fs.object);
 	vm_page_lock(fs.m);
 
@@ -1006,31 +999,49 @@ vm_fault_cache_behind(const struct fault
  * of mmap time.
  */
 static void
-vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
+vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
+    int faultcount, int reqpage)
 {
-	int i;
+	pmap_t pmap;
+	vm_map_entry_t entry;
+	vm_object_t backing_object, lobject;
 	vm_offset_t addr, starta;
 	vm_pindex_t pindex;
 	vm_page_t m;
-	vm_object_t object;
+	int backward, forward, i;
 
+	pmap = fs->map->pmap;
 	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
 		return;
 
-	object = entry->object.vm_object;
+	if (faultcount > 0) {
+		backward = reqpage;
+		forward = faultcount - reqpage - 1;
+	} else {
+		backward = PFBAK;
+		forward = PFFOR;
+	}
+	entry = fs->entry;
 
-	starta = addra - PFBAK * PAGE_SIZE;
+	starta = addra - backward * PAGE_SIZE;
 	if (starta < entry->start) {
 		starta = entry->start;
 	} else if (starta > addra) {
 		starta = 0;
 	}
 
-	for (i = 0; i < PAGEORDER_SIZE; i++) {
-		vm_object_t backing_object, lobject;
-
-		addr = addra + prefault_pageorder[i];
-		if (addr > addra + (PFFOR * PAGE_SIZE))
+	/*
+	 * Generate the sequence of virtual addresses that are candidates for
+	 * prefaulting in an outward spiral from the faulting virtual address,
+	 * "addra".  Specifically, the sequence is "addra - PAGE_SIZE", "addra
+	 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ...
+	 * If the candidate address doesn't have a backing physical page, then
+	 * the loop immediately terminates.
+	 */
+	for (i = 0; i < 2 * imax(backward, forward); i++) {
+		addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE :
+		    PAGE_SIZE);
+		if (addr > addra + forward * PAGE_SIZE)
 			addr = 0;
 
 		if (addr < starta || addr >= entry->end)
@@ -1040,7 +1051,7 @@ vm_fault_prefault(pmap_t pmap, vm_offset
 			continue;
 
 		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
-		lobject = object;
+		lobject = entry->object.vm_object;
 		VM_OBJECT_RLOCK(lobject);
 		while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
 		    lobject->type == OBJT_DEFAULT &&
@@ -1052,9 +1063,6 @@ vm_fault_prefault(pmap_t pmap, vm_offset
 			VM_OBJECT_RUNLOCK(lobject);
 			lobject = backing_object;
 		}
-		/*
-		 * give-up when a page is not in memory
-		 */
 		if (m == NULL) {
 			VM_OBJECT_RUNLOCK(lobject);
 			break;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201408252116.s7PLGvOV039666>