Date: Sat, 7 Jul 2018 17:58:21 +0000 (UTC) From: Mark Johnston <markj@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org Subject: svn commit: r336071 - in stable/11/sys: amd64/amd64 arm64/arm64 Message-ID: <201807071758.w67HwLIB067726@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: markj Date: Sat Jul 7 17:58:20 2018 New Revision: 336071 URL: https://svnweb.freebsd.org/changeset/base/336071 Log: MFC r335580: Re-count available PV entries after reclaiming a PV chunk. Modified: stable/11/sys/amd64/amd64/pmap.c stable/11/sys/arm64/arm64/pmap.c Directory Properties: stable/11/ (props changed) Modified: stable/11/sys/amd64/amd64/pmap.c ============================================================================== --- stable/11/sys/amd64/amd64/pmap.c Sat Jul 7 17:25:09 2018 (r336070) +++ stable/11/sys/amd64/amd64/pmap.c Sat Jul 7 17:58:20 2018 (r336071) @@ -3483,8 +3483,9 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwl { struct pch new_tail; struct pv_chunk *pc; - int avail, free; vm_page_t m; + int avail, free; + bool reclaimed; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); @@ -3512,13 +3513,14 @@ retry: if (avail >= needed) break; } - for (; avail < needed; avail += _NPCPV) { + for (reclaimed = false; avail < needed; avail += _NPCPV) { m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; + reclaimed = true; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); @@ -3531,6 +3533,14 @@ retry: TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); + + /* + * The reclaim might have freed a chunk from the current pmap. + * If that chunk contained available entries, we need to + * re-count the number of available entries. + */ + if (reclaimed) + goto retry; } if (!TAILQ_EMPTY(&new_tail)) { mtx_lock(&pv_chunks_mutex); Modified: stable/11/sys/arm64/arm64/pmap.c ============================================================================== --- stable/11/sys/arm64/arm64/pmap.c Sat Jul 7 17:25:09 2018 (r336070) +++ stable/11/sys/arm64/arm64/pmap.c Sat Jul 7 17:58:20 2018 (r336071) @@ -2108,8 +2108,9 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwl { struct pch new_tail; struct pv_chunk *pc; - int avail, free; vm_page_t m; + int avail, free; + bool reclaimed; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); @@ -2132,13 +2133,14 @@ retry: if (avail >= needed) break; } - for (; avail < needed; avail += _NPCPV) { + for (reclaimed = false; avail < needed; avail += _NPCPV) { m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; + reclaimed = true; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); @@ -2151,6 +2153,14 @@ retry: TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); + + /* + * The reclaim might have freed a chunk from the current pmap. + * If that chunk contained available entries, we need to + * re-count the number of available entries. + */ + if (reclaimed) + goto retry; } if (!TAILQ_EMPTY(&new_tail)) { mtx_lock(&pv_chunks_mutex);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201807071758.w67HwLIB067726>