Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 19 Nov 2019 08:06:31 +0000 (UTC)
From:      Doug Moore <dougm@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r354850 - head/sys/vm
Message-ID:  <201911190806.xAJ86VGJ028135@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: dougm
Date: Tue Nov 19 08:06:31 2019
New Revision: 354850
URL: https://svnweb.freebsd.org/changeset/base/354850

Log:
  Drop the extra argument from swp_pager_meta_ctl and have it do lookup
  only.  Rename it swp_pager_meta_lookup.  Stop checking for obj->type
  == swap there and assert it instead.  Make the caller responsible for
  the obj->type check.
  
  Move the meta_ctl 'pop' functionality to swap_pager_unswapped, the
  only place that uses it, and assume obj->type == swap there too.
  
  Assisted by: ota_j.email.ne.jp
  Reviewed by: kib
  Tested by: pho
  Differential Revision: https://reviews.freebsd.org/D22437

Modified:
  head/sys/vm/swap_pager.c

Modified: head/sys/vm/swap_pager.c
==============================================================================
--- head/sys/vm/swap_pager.c	Tue Nov 19 07:20:59 2019	(r354849)
+++ head/sys/vm/swap_pager.c	Tue Nov 19 08:06:31 2019	(r354850)
@@ -321,8 +321,6 @@ swap_release_by_cred(vm_ooffset_t decr, struct ucred *
 #endif
 }
 
-#define SWM_POP		0x01	/* pop out			*/
-
 static int swap_pager_full = 2;	/* swap space exhaustion (task killing) */
 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
 static struct mtx swbuf_mtx;	/* to sync nsw_wcount_async */
@@ -426,7 +424,7 @@ static void swp_pager_meta_free(vm_object_t, vm_pindex
 static void swp_pager_meta_transfer(vm_object_t src, vm_object_t dst,
     vm_pindex_t pindex, vm_pindex_t count);
 static void swp_pager_meta_free_all(vm_object_t);
-static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
+static daddr_t swp_pager_meta_lookup(vm_object_t, vm_pindex_t);
 
 static void
 swp_pager_init_freerange(daddr_t *start, daddr_t *num)
@@ -942,7 +940,10 @@ swp_pager_xfer_source(vm_object_t srcobject, vm_object
 {
 	daddr_t dstaddr;
 
-	if (swp_pager_meta_ctl(dstobject, pindex, 0) != SWAPBLK_NONE) {
+	KASSERT(srcobject->type == OBJT_SWAP,
+	    ("%s: Srcobject not swappable", __func__));
+	if (dstobject->type == OBJT_SWAP &&
+	    swp_pager_meta_lookup(dstobject, pindex) != SWAPBLK_NONE) {
 		/* Caller should destroy the source block. */
 		return (false);
 	}
@@ -1050,11 +1051,13 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pin
 	int i;
 
 	VM_OBJECT_ASSERT_LOCKED(object);
+	KASSERT(object->type == OBJT_SWAP,
+	    ("%s: object not swappable", __func__));
 
 	/*
 	 * do we have good backing store at the requested index ?
 	 */
-	blk0 = swp_pager_meta_ctl(object, pindex, 0);
+	blk0 = swp_pager_meta_lookup(object, pindex);
 	if (blk0 == SWAPBLK_NONE) {
 		if (before)
 			*before = 0;
@@ -1070,7 +1073,7 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pin
 		for (i = 1; i < SWB_NPAGES; i++) {
 			if (i > pindex)
 				break;
-			blk = swp_pager_meta_ctl(object, pindex - i, 0);
+			blk = swp_pager_meta_lookup(object, pindex - i);
 			if (blk != blk0 - i)
 				break;
 		}
@@ -1082,7 +1085,7 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pin
 	 */
 	if (after != NULL) {
 		for (i = 1; i < SWB_NPAGES; i++) {
-			blk = swp_pager_meta_ctl(object, pindex + i, 0);
+			blk = swp_pager_meta_lookup(object, pindex + i);
 			if (blk != blk0 + i)
 				break;
 		}
@@ -1113,11 +1116,26 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pin
 static void
 swap_pager_unswapped(vm_page_t m)
 {
-	daddr_t srcaddr;
+	struct swblk *sb;
 
-	srcaddr = swp_pager_meta_ctl(m->object, m->pindex, SWM_POP);
-	if (srcaddr != SWAPBLK_NONE)
-		swp_pager_freeswapspace(srcaddr, 1);
+	VM_OBJECT_ASSERT_WLOCKED(m->object);
+
+	/*
+	 * The meta data only exists if the object is OBJT_SWAP
+	 * and even then might not be allocated yet.
+	 */
+	KASSERT(m->object->type == OBJT_SWAP,
+	    ("Free object not swappable"));
+
+	sb = SWAP_PCTRIE_LOOKUP(&m->object->un_pager.swp.swp_blks,
+	    rounddown(m->pindex, SWAP_META_PAGES));
+	if (sb == NULL)
+		return;
+	if (sb->d[m->pindex % SWAP_META_PAGES] == SWAPBLK_NONE)
+		return;
+	swp_pager_freeswapspace(sb->d[m->pindex % SWAP_META_PAGES], 1);
+	sb->d[m->pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
+	swp_pager_free_empty_swblk(m->object, sb);
 }
 
 /*
@@ -1152,6 +1170,8 @@ swap_pager_getpages(vm_object_t object, vm_page_t *ma,
 	 * and then this function incorrectly recreates those pages as
 	 * read-behind pages in the current object.
 	 */
+	KASSERT(object->type == OBJT_SWAP,
+	    ("%s: object not swappable", __func__));
 	if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead))
 		return (VM_PAGER_FAIL);
 
@@ -1211,7 +1231,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *ma,
 	vm_object_pip_add(object, count);
 
 	pindex = bm->pindex;
-	blk = swp_pager_meta_ctl(object, pindex, 0);
+	blk = swp_pager_meta_lookup(object, pindex);
 	KASSERT(blk != SWAPBLK_NONE,
 	    ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
 
@@ -1688,6 +1708,8 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t
 	KASSERT(npages > 0, ("%s: No pages", __func__));
 	KASSERT(npages <= MAXPHYS / PAGE_SIZE,
 	    ("%s: Too many pages: %d", __func__, npages));
+	KASSERT(object->type == OBJT_SWAP,
+	    ("%s: Object not swappable", __func__));
 	vm_object_pip_add(object, npages);
 	vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, ma, npages);
 	for (i = j = 0;; i++) {
@@ -1725,6 +1747,8 @@ swap_pager_swapoff_object(struct swdevt *sp, vm_object
 	daddr_t blk, n_blks, s_blk;
 	int i;
 
+	KASSERT(object->type == OBJT_SWAP,
+	    ("%s: Object not swappable", __func__));
 	n_blks = 0;
 	for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
 	    &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
@@ -2123,39 +2147,26 @@ swp_pager_meta_free_all(vm_object_t object)
  *	When acting on a busy resident page and paging is in progress, we
  *	have to wait until paging is complete but otherwise can act on the
  *	busy page.
- *
- *	SWM_POP		remove from meta data but do not free it
  */
 static daddr_t
-swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
+swp_pager_meta_lookup(vm_object_t object, vm_pindex_t pindex)
 {
 	struct swblk *sb;
-	daddr_t r1;
 
-	if ((flags & SWM_POP) != 0)
-		VM_OBJECT_ASSERT_WLOCKED(object);
-	else
-		VM_OBJECT_ASSERT_LOCKED(object);
+	VM_OBJECT_ASSERT_LOCKED(object);
 
 	/*
 	 * The meta data only exists if the object is OBJT_SWAP
 	 * and even then might not be allocated yet.
 	 */
-	if (object->type != OBJT_SWAP)
-		return (SWAPBLK_NONE);
+	KASSERT(object->type == OBJT_SWAP,
+	    ("Lookup object not swappable"));
 
 	sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
 	    rounddown(pindex, SWAP_META_PAGES));
 	if (sb == NULL)
 		return (SWAPBLK_NONE);
-	r1 = sb->d[pindex % SWAP_META_PAGES];
-	if (r1 == SWAPBLK_NONE)
-		return (SWAPBLK_NONE);
-	if ((flags & SWM_POP) != 0) {
-		sb->d[pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
-		swp_pager_free_empty_swblk(object, sb);
-	}
-	return (r1);
+	return (sb->d[pindex % SWAP_META_PAGES]);
 }
 
 /*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201911190806.xAJ86VGJ028135>