Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 1 Nov 2017 18:06:45 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r325285 - head/sys/amd64/amd64
Message-ID:  <201711011806.vA1I6jfs043658@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Wed Nov  1 18:06:44 2017
New Revision: 325285
URL: https://svnweb.freebsd.org/changeset/base/325285

Log:
  Restore an optimization that was temporary disabled by r324665.
  
  In reclaim_pv_chunk(), rotate the pv chunks list so that next
  invocations of the reclaim do not scan the same pv chunks that could
  not be freed.  Only do the rotation when there is no parallel scan,
  tracked by active_reclaims counter.
  
  To rotate, move all chunks that are before current iteration marker,
  after another marker that is inserted at the list tail on start of the
  reclaim.
  
  Reviewed by:	alc
  Tested by:	pho (previous version)
  Sponsored by:	The FreeBSD Foundation
  MFC after:	1 week

Modified:
  head/sys/amd64/amd64/pmap.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Wed Nov  1 16:32:11 2017	(r325284)
+++ head/sys/amd64/amd64/pmap.c	Wed Nov  1 18:06:44 2017	(r325285)
@@ -2892,8 +2892,8 @@ reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked
 static vm_page_t
 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
 {
-	struct pv_chunk *pc, *pc_marker;
-	struct pv_chunk_header pc_marker_b;
+	struct pv_chunk *pc, *pc_marker, *pc_marker_end;
+	struct pv_chunk_header pc_marker_b, pc_marker_end_b;
 	struct md_page *pvh;
 	pd_entry_t *pde;
 	pmap_t next_pmap, pmap;
@@ -2906,6 +2906,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **l
 	uint64_t inuse;
 	int bit, field, freed;
 	bool start_di;
+	static int active_reclaims = 0;
 
 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
@@ -2914,7 +2915,9 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **l
 	PG_G = PG_A = PG_M = PG_RW = 0;
 	SLIST_INIT(&free);
 	bzero(&pc_marker_b, sizeof(pc_marker_b));
+	bzero(&pc_marker_end, sizeof(pc_marker_end));
 	pc_marker = (struct pv_chunk *)&pc_marker_b;
+	pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
 
 	/*
 	 * A delayed invalidation block should already be active if
@@ -2924,12 +2927,21 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **l
 	start_di = pmap_not_in_di();
 
 	mtx_lock(&pv_chunks_mutex);
+	active_reclaims++;
 	TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
-	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != NULL &&
+	TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
+	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
 	    SLIST_EMPTY(&free)) {
 		next_pmap = pc->pc_pmap;
-		if (next_pmap == NULL)		/* marker */
+		if (next_pmap == NULL) {
+			/*
+			 * The next chunk is a marker.  However, it is
+			 * not our marker, so active_reclaims must be
+			 * > 1.  Consequently, the next_chunk code
+			 * will not rotate the pv_chunks list.
+			 */
 			goto next_chunk;
+		}
 		mtx_unlock(&pv_chunks_mutex);
 
 		/*
@@ -3043,8 +3055,24 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **l
 next_chunk:
 		TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
 		TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
+		if (active_reclaims == 1 && pmap != NULL) {
+			/*
+			 * Rotate the pv chunks list so that we do not
+			 * scan the same pv chunks that could not be
+			 * freed (because they contained a wired
+			 * and/or superpage mapping) on every
+			 * invocation of reclaim_pv_chunk().
+			 */
+			while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
+				MPASS(pc->pc_pmap != NULL);
+				TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+				TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
+			}
+		}
 	}
 	TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
+	TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
+	active_reclaims--;
 	mtx_unlock(&pv_chunks_mutex);
 	reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201711011806.vA1I6jfs043658>