Date: Wed, 31 Jul 2019 05:38:40 +0000 (UTC) From: Alan Cox <alc@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r350463 - in head/sys: amd64/amd64 arm64/arm64 i386/i386 Message-ID: <201907310538.x6V5ce3E066066@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: alc Date: Wed Jul 31 05:38:39 2019 New Revision: 350463 URL: https://svnweb.freebsd.org/changeset/base/350463 Log: In pmap_advise(), when we encounter a superpage mapping, we first demote the mapping and then destroy one of the 4 KB page mappings so that there is a potential trigger for repromotion. Currently, we destroy the first 4 KB page mapping that falls within the (current) superpage mapping or the virtual address range [sva, eva). However, I have found empirically that destroying the last 4 KB mapping produces slightly better results, specifically, more promotions and fewer failed promotion attempts. Accordingly, this revision changes pmap_advise() to destroy the last 4 KB page mapping. It also replaces some nearby uses of boolean_t with bool. Reviewed by: kib, markj Differential Revision: https://reviews.freebsd.org/D21115 Modified: head/sys/amd64/amd64/pmap.c head/sys/arm64/arm64/pmap.c head/sys/i386/i386/pmap.c Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Wed Jul 31 04:19:53 2019 (r350462) +++ head/sys/amd64/amd64/pmap.c Wed Jul 31 05:38:39 2019 (r350463) @@ -7444,7 +7444,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V; vm_offset_t va, va_next; vm_page_t m; - boolean_t anychanged; + bool anychanged; if (advice != MADV_DONTNEED && advice != MADV_FREE) return; @@ -7463,7 +7463,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); - anychanged = FALSE; + anychanged = false; pmap_delayed_invl_start(); PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { @@ -7505,17 +7505,25 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t /* * Unless the page mappings are wired, remove the * mapping to a single page so that a subsequent - * access may repromote. Since the underlying page - * table page is fully populated, this removal never - * frees a page table page. + * access may repromote. Choosing the last page + * within the address range [sva, min(va_next, eva)) + * generally results in more repromotions. Since the + * underlying page table page is fully populated, this + * removal never frees a page table page. */ if ((oldpde & PG_W) == 0) { - pte = pmap_pde_to_pte(pde, sva); + va = eva; + if (va > va_next) + va = va_next; + va -= PAGE_SIZE; + KASSERT(va >= sva, + ("pmap_advise: no address gap")); + pte = pmap_pde_to_pte(pde, va); KASSERT((*pte & PG_V) != 0, ("pmap_advise: invalid PTE")); - pmap_remove_pte(pmap, pte, sva, *pde, NULL, + pmap_remove_pte(pmap, pte, va, *pde, NULL, &lock); - anychanged = TRUE; + anychanged = true; } if (lock != NULL) rw_wunlock(lock); @@ -7547,7 +7555,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t if (va == va_next) va = sva; } else - anychanged = TRUE; + anychanged = true; continue; maybe_invlrng: if (va != va_next) { Modified: head/sys/arm64/arm64/pmap.c ============================================================================== --- head/sys/arm64/arm64/pmap.c Wed Jul 31 04:19:53 2019 (r350462) +++ head/sys/arm64/arm64/pmap.c Wed Jul 31 05:38:39 2019 (r350463) @@ -4888,15 +4888,23 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t /* * Unless the page mappings are wired, remove the * mapping to a single page so that a subsequent - * access may repromote. Since the underlying page - * table page is fully populated, this removal never - * frees a page table page. + * access may repromote. Choosing the last page + * within the address range [sva, min(va_next, eva)) + * generally results in more repromotions. Since the + * underlying page table page is fully populated, this + * removal never frees a page table page. */ if ((oldl2 & ATTR_SW_WIRED) == 0) { - l3 = pmap_l2_to_l3(l2, sva); + va = eva; + if (va > va_next) + va = va_next; + va -= PAGE_SIZE; + KASSERT(va >= sva, + ("pmap_advise: no address gap")); + l3 = pmap_l2_to_l3(l2, va); KASSERT(pmap_load(l3) != 0, ("pmap_advise: invalid PTE")); - pmap_remove_l3(pmap, l3, sva, pmap_load(l2), + pmap_remove_l3(pmap, l3, va, pmap_load(l2), NULL, &lock); } if (lock != NULL) Modified: head/sys/i386/i386/pmap.c ============================================================================== --- head/sys/i386/i386/pmap.c Wed Jul 31 04:19:53 2019 (r350462) +++ head/sys/i386/i386/pmap.c Wed Jul 31 05:38:39 2019 (r350463) @@ -5167,19 +5167,19 @@ __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, pt_entry_t *pte; vm_offset_t va, pdnxt; vm_page_t m; - boolean_t anychanged, pv_lists_locked; + bool anychanged, pv_lists_locked; if (advice != MADV_DONTNEED && advice != MADV_FREE) return; if (pmap_is_current(pmap)) - pv_lists_locked = FALSE; + pv_lists_locked = false; else { - pv_lists_locked = TRUE; + pv_lists_locked = true; resume: rw_wlock(&pvh_global_lock); sched_pin(); } - anychanged = FALSE; + anychanged = false; PMAP_LOCK(pmap); for (; sva < eva; sva = pdnxt) { pdnxt = (sva + NBPDR) & ~PDRMASK; @@ -5193,7 +5193,7 @@ resume: if ((oldpde & PG_MANAGED) == 0) continue; if (!pv_lists_locked) { - pv_lists_locked = TRUE; + pv_lists_locked = true; if (!rw_try_wlock(&pvh_global_lock)) { if (anychanged) pmap_invalidate_all_int(pmap); @@ -5212,16 +5212,24 @@ resume: /* * Unless the page mappings are wired, remove the * mapping to a single page so that a subsequent - * access may repromote. Since the underlying page - * table page is fully populated, this removal never - * frees a page table page. + * access may repromote. Choosing the last page + * within the address range [sva, min(pdnxt, eva)) + * generally results in more repromotions. Since the + * underlying page table page is fully populated, this + * removal never frees a page table page. */ if ((oldpde & PG_W) == 0) { - pte = pmap_pte_quick(pmap, sva); + va = eva; + if (va > pdnxt) + va = pdnxt; + va -= PAGE_SIZE; + KASSERT(va >= sva, + ("pmap_advise: no address gap")); + pte = pmap_pte_quick(pmap, va); KASSERT((*pte & PG_V) != 0, ("pmap_advise: invalid PTE")); - pmap_remove_pte(pmap, pte, sva, NULL); - anychanged = TRUE; + pmap_remove_pte(pmap, pte, va, NULL); + anychanged = true; } } if (pdnxt > eva) @@ -5250,7 +5258,7 @@ resume: if (va == pdnxt) va = sva; } else - anychanged = TRUE; + anychanged = true; continue; maybe_invlrng: if (va != pdnxt) {
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201907310538.x6V5ce3E066066>