Date: Sun, 1 Jan 2012 16:05:36 +0000 (UTC) From: Nathan Whitehorn <nwhitehorn@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-9@freebsd.org Subject: svn commit: r229171 - in stable/9/sys/powerpc: aim include Message-ID: <201201011605.q01G5aMn019838@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: nwhitehorn Date: Sun Jan 1 16:05:36 2012 New Revision: 229171 URL: http://svn.freebsd.org/changeset/base/229171 Log: Keep track of PVO entries in each pmap, which allows much faster pmap_remove() for large sparse requests. This can prevent pmap_remove() operations on 64-bit process destruction or swapout that would take several hundred times the lifetime of the universe to complete. This behavior is largely indistinguishable from a hang. Modified: stable/9/sys/powerpc/aim/mmu_oea.c stable/9/sys/powerpc/aim/mmu_oea64.c stable/9/sys/powerpc/include/pmap.h Directory Properties: stable/9/sys/ (props changed) Modified: stable/9/sys/powerpc/aim/mmu_oea.c ============================================================================== --- stable/9/sys/powerpc/aim/mmu_oea.c Sun Jan 1 16:04:02 2012 (r229170) +++ stable/9/sys/powerpc/aim/mmu_oea.c Sun Jan 1 16:05:36 2012 (r229171) @@ -824,6 +824,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t k for (i = 0; i < 16; i++) kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; CPU_FILL(&kernel_pmap->pm_active); + LIST_INIT(&kernel_pmap->pmap_pvo); /* * Set up the Open Firmware mappings @@ -1582,6 +1583,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap) KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); PMAP_LOCK_INIT(pmap); + LIST_INIT(&pmap->pmap_pvo); entropy = 0; __asm __volatile("mftb %0" : "=r"(entropy)); @@ -1763,10 +1765,17 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_off vm_page_lock_queues(); PMAP_LOCK(pm); - for (; sva < eva; sva += PAGE_SIZE) { - pvo = moea_pvo_find_va(pm, sva, &pteidx); - if (pvo != NULL) { - moea_pvo_remove(pvo, pteidx); + if ((eva - sva)/PAGE_SIZE < 10) { + for (; sva < eva; sva += PAGE_SIZE) { + pvo = moea_pvo_find_va(pm, sva, &pteidx); + if (pvo != NULL) + moea_pvo_remove(pvo, pteidx); + } + } else { + LIST_FOREACH(pvo, &pm->pmap_pvo, pvo_plink) { + if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva) + continue; + moea_pvo_remove(pvo, -1); } } PMAP_UNLOCK(pm); @@ -1929,6 +1938,11 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zon moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); /* + * Add to pmap list + */ + LIST_INSERT_HEAD(&pm->pmap_pvo, pvo, pvo_plink); + + /* * Remember if the list was empty and therefore will be the first * item. */ @@ -1994,9 +2008,10 @@ moea_pvo_remove(struct pvo_entry *pvo, i } /* - * Remove this PVO from the PV list. + * Remove this PVO from the PV and pmap lists. */ LIST_REMOVE(pvo, pvo_vlink); + LIST_REMOVE(pvo, pvo_plink); /* * Remove this from the overflow list and return it to the pool Modified: stable/9/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- stable/9/sys/powerpc/aim/mmu_oea64.c Sun Jan 1 16:04:02 2012 (r229170) +++ stable/9/sys/powerpc/aim/mmu_oea64.c Sun Jan 1 16:05:36 2012 (r229171) @@ -831,6 +831,7 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offs kernel_pmap->pmap_phys = kernel_pmap; CPU_FILL(&kernel_pmap->pm_active); + LIST_INIT(&kernel_pmap->pmap_pvo); PMAP_LOCK_INIT(kernel_pmap); @@ -1855,6 +1856,7 @@ void moea64_pinit(mmu_t mmu, pmap_t pmap) { PMAP_LOCK_INIT(pmap); + LIST_INIT(&pmap->pmap_pvo); pmap->pm_slb_tree_root = slb_alloc_tree(); pmap->pm_slb = slb_alloc_user_cache(); @@ -1868,6 +1870,7 @@ moea64_pinit(mmu_t mmu, pmap_t pmap) uint32_t hash; PMAP_LOCK_INIT(pmap); + LIST_INIT(&pmap->pmap_pvo); if (pmap_bootstrapped) pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, @@ -2034,10 +2037,18 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_o vm_page_lock_queues(); PMAP_LOCK(pm); - for (; sva < eva; sva += PAGE_SIZE) { - pvo = moea64_pvo_find_va(pm, sva); - if (pvo != NULL) + if ((eva - sva)/PAGE_SIZE < 10) { + for (; sva < eva; sva += PAGE_SIZE) { + pvo = moea64_pvo_find_va(pm, sva); + if (pvo != NULL) + moea64_pvo_remove(mmu, pvo); + } + } else { + LIST_FOREACH(pvo, &pm->pmap_pvo, pvo_plink) { + if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva) + continue; moea64_pvo_remove(mmu, pvo); + } } vm_page_unlock_queues(); PMAP_UNLOCK(pm); @@ -2231,6 +2242,11 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u (uint64_t)(pa) | pte_lo, flags); /* + * Add to pmap list + */ + LIST_INSERT_HEAD(&pm->pmap_pvo, pvo, pvo_plink); + + /* * Remember if the list was empty and therefore will be the first * item. */ @@ -2311,9 +2327,10 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_ } /* - * Remove this PVO from the PV list. + * Remove this PVO from the PV and pmap lists. */ LIST_REMOVE(pvo, pvo_vlink); + LIST_REMOVE(pvo, pvo_plink); /* * Remove this from the overflow list and return it to the pool Modified: stable/9/sys/powerpc/include/pmap.h ============================================================================== --- stable/9/sys/powerpc/include/pmap.h Sun Jan 1 16:04:02 2012 (r229170) +++ stable/9/sys/powerpc/include/pmap.h Sun Jan 1 16:05:36 2012 (r229171) @@ -88,28 +88,13 @@ struct pmap_md { #endif /* !defined(NPMAPS) */ struct slbtnode; - -struct pmap { - struct mtx pm_mtx; - - #ifdef __powerpc64__ - struct slbtnode *pm_slb_tree_root; - struct slb **pm_slb; - int pm_slb_len; - #else - register_t pm_sr[16]; - #endif - cpuset_t pm_active; - - struct pmap *pmap_phys; - struct pmap_statistics pm_stats; -}; - +struct pmap; typedef struct pmap *pmap_t; struct pvo_entry { LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ LIST_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ + LIST_ENTRY(pvo_entry) pvo_plink; /* Link to pmap entries */ union { struct pte pte; /* 32 bit PTE */ struct lpte lpte; /* 64 bit PTE */ @@ -137,6 +122,23 @@ LIST_HEAD(pvo_head, pvo_entry); ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) #define PVO_VSID(pvo) ((pvo)->pvo_vpn >> 16) +struct pmap { + struct mtx pm_mtx; + + #ifdef __powerpc64__ + struct slbtnode *pm_slb_tree_root; + struct slb **pm_slb; + int pm_slb_len; + #else + register_t pm_sr[16]; + #endif + cpuset_t pm_active; + + struct pmap *pmap_phys; + struct pmap_statistics pm_stats; + struct pvo_head pmap_pvo; +}; + struct md_page { u_int64_t mdpg_attrs; vm_memattr_t mdpg_cache_attrs;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201201011605.q01G5aMn019838>