Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 14 Aug 2017 11:06:09 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org
Subject:   svn commit: r322492 - stable/11/sys/amd64/amd64
Message-ID:  <201708141106.v7EB6915015310@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Mon Aug 14 11:06:09 2017
New Revision: 322492
URL: https://svnweb.freebsd.org/changeset/base/322492

Log:
  MFC r322175:
  Avoid DI recursion when reclaim_pv_chunk() is called from
  pmap_advise() or pmap_remove().

Modified:
  stable/11/sys/amd64/amd64/pmap.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/11/sys/amd64/amd64/pmap.c	Mon Aug 14 11:04:06 2017	(r322491)
+++ stable/11/sys/amd64/amd64/pmap.c	Mon Aug 14 11:06:09 2017	(r322492)
@@ -430,8 +430,15 @@ static struct lock_object invl_gen_ts = {
 	.lo_name = "invlts",
 };
 
+static bool
+pmap_not_in_di(void)
+{
+
+	return (curthread->td_md.md_invl_gen.gen == 0);
+}
+
 #define	PMAP_ASSERT_NOT_IN_DI() \
-    KASSERT(curthread->td_md.md_invl_gen.gen == 0, ("DI already started"))
+    KASSERT(pmap_not_in_di(), ("DI already started"))
 
 /*
  * Start a new Delayed Invalidation (DI) block of code, executed by
@@ -2847,6 +2854,19 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG
 	"Current number of spare pv entries");
 #endif
 
+static void
+reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di)
+{
+
+	if (pmap == NULL)
+		return;
+	pmap_invalidate_all(pmap);
+	if (pmap != locked_pmap)
+		PMAP_UNLOCK(pmap);
+	if (start_di)
+		pmap_delayed_invl_finished();
+}
+
 /*
  * We are in a serious low memory condition.  Resort to
  * drastic measures to free some pages so we can allocate
@@ -2874,6 +2894,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **l
 	struct spglist free;
 	uint64_t inuse;
 	int bit, field, freed;
+	bool start_di;
 
 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
@@ -2882,19 +2903,21 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **l
 	PG_G = PG_A = PG_M = PG_RW = 0;
 	SLIST_INIT(&free);
 	TAILQ_INIT(&new_tail);
-	pmap_delayed_invl_started();
+
+	/*
+	 * A delayed invalidation block should already be active if
+	 * pmap_advise() or pmap_remove() called this function by way
+	 * of pmap_demote_pde_locked().
+	 */
+	start_di = pmap_not_in_di();
+
 	mtx_lock(&pv_chunks_mutex);
 	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) {
 		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
 		mtx_unlock(&pv_chunks_mutex);
 		if (pmap != pc->pc_pmap) {
-			if (pmap != NULL) {
-				pmap_invalidate_all(pmap);
-				if (pmap != locked_pmap)
-					PMAP_UNLOCK(pmap);
-			}
-			pmap_delayed_invl_finished();
-			pmap_delayed_invl_started();
+			reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
+			    start_di);
 			pmap = pc->pc_pmap;
 			/* Avoid deadlock and lock recursion. */
 			if (pmap > locked_pmap) {
@@ -2911,6 +2934,8 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **l
 			PG_A = pmap_accessed_bit(pmap);
 			PG_M = pmap_modified_bit(pmap);
 			PG_RW = pmap_rw_bit(pmap);
+			if (start_di)
+				pmap_delayed_invl_started();
 		}
 
 		/*
@@ -2985,12 +3010,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **l
 	}
 	TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
 	mtx_unlock(&pv_chunks_mutex);
-	if (pmap != NULL) {
-		pmap_invalidate_all(pmap);
-		if (pmap != locked_pmap)
-			PMAP_UNLOCK(pmap);
-	}
-	pmap_delayed_invl_finished();
+	reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {
 		m_pc = SLIST_FIRST(&free);
 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201708141106.v7EB6915015310>