Date: Sat, 8 Sep 2012 17:43:20 +0000 (UTC) From: Alan Cox <alc@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r240241 - head/sys/mips/mips Message-ID: <201209081743.q88HhKBw029744@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: alc Date: Sat Sep 8 17:43:20 2012 New Revision: 240241 URL: http://svn.freebsd.org/changeset/base/240241 Log: pmap_remove: Avoid re-walking the page table from the root for every PTE examined. Tidy up some of pmap_remove()'s helper functions. pmap_enter: Set PV_TABLE_REF whenever the physical page being mapped is managed, not just when it is writeable. Only call pmap_update_page() when the old mapping was valid. If there was no prior valid mapping, then pmap_update_page() serves no useful purpose. However, it will interrupt every processor that has the pmap active. pmap_enter_quick_locked: Always set PTE_RO. pmap_emulate_modified: Don't set PV_TABLE_REF. Eliminate a nonsensical comment. Modified: head/sys/mips/mips/pmap.c Modified: head/sys/mips/mips/pmap.c ============================================================================== --- head/sys/mips/mips/pmap.c Sat Sep 8 16:47:52 2012 (r240240) +++ head/sys/mips/mips/pmap.c Sat Sep 8 17:43:20 2012 (r240241) @@ -1677,6 +1677,11 @@ pmap_remove_pte(struct pmap *pmap, pt_en rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); + /* + * Write back all cache lines from the page being unmapped. + */ + mips_dcache_wbinv_range_index(va, PAGE_SIZE); + oldpte = *ptq; if (is_kernel_pmap(pmap)) *ptq = PTE_G; @@ -1723,24 +1728,13 @@ pmap_remove_page(struct pmap *pmap, vm_o ptq = pmap_pde_to_pte(pde, va); /* - * if there is no pte for this address, just skip it!!! + * If there is no pte for this address, just skip it! */ - if (!pte_test(ptq, PTE_V)) { + if (!pte_test(ptq, PTE_V)) return; - } - /* - * Write back all caches from the page being destroyed - */ - mips_dcache_wbinv_range_index(va, PAGE_SIZE); - - /* - * get a local va for mappings for this pmap. - */ (void)pmap_remove_pte(pmap, ptq, va, *pde); pmap_invalidate_page(pmap, va); - - return; } /* @@ -1789,9 +1783,12 @@ pmap_remove(struct pmap *pmap, vm_offset continue; if (va_next > eva) va_next = eva; - for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; - pte++, sva += PAGE_SIZE) { - pmap_remove_page(pmap, sva); + for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, + sva += PAGE_SIZE) { + if (!pte_test(pte, PTE_V)) + continue; + pmap_remove_pte(pmap, pte, sva, *pde); + pmap_invalidate_page(pmap, sva); } } out: @@ -2035,6 +2032,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, mpte->wire_count--; if (pte_test(&origpte, PTE_MANAGED)) { + m->md.pv_flags |= PV_TABLE_REF; om = m; newpte |= PTE_MANAGED; if (!pte_test(&newpte, PTE_RO)) @@ -2070,6 +2068,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0) { + m->md.pv_flags |= PV_TABLE_REF; if (pv == NULL) pv = get_pv_entry(pmap, FALSE); pv->pv_va = va; @@ -2087,8 +2086,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, pmap->pm_stats.wired_count++; validate: - if ((access & VM_PROT_WRITE) != 0) - m->md.pv_flags |= PV_TABLE_REF; #ifdef PMAP_DEBUG printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa); @@ -2099,8 +2096,8 @@ validate: * update the pte. */ if (origpte != newpte) { + *pte = newpte; if (pte_test(&origpte, PTE_V)) { - *pte = newpte; if (pte_test(&origpte, PTE_MANAGED) && opa != pa) { if (om->md.pv_flags & PV_TABLE_REF) vm_page_aflag_set(om, PGA_REFERENCED); @@ -2116,11 +2113,9 @@ validate: if (pte_test(&origpte, PTE_MANAGED) && TAILQ_EMPTY(&om->md.pv_list)) vm_page_aflag_clear(om, PGA_WRITEABLE); - } else { - *pte = newpte; + pmap_update_page(pmap, va, newpte); } } - pmap_update_page(pmap, va, newpte); /* * Sync I & D caches for executable pages. Do this only if the @@ -2239,7 +2234,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ /* * Now validate mapping with RO protection */ - *pte = TLBLO_PA_TO_PFN(pa) | PTE_V; + *pte = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V; if ((m->oflags & VPO_UNMANAGED) == 0) *pte |= PTE_MANAGED; @@ -2251,7 +2246,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ if (is_kernel_pmap(pmap)) *pte |= PTE_G; else { - *pte |= PTE_RO; /* * Sync I & D caches. Do this only if the target pmap * belongs to the current process. Otherwise, an @@ -3255,9 +3249,7 @@ init_pte_prot(vm_page_t m, vm_prot_t acc int pmap_emulate_modified(pmap_t pmap, vm_offset_t va) { - vm_page_t m; pt_entry_t *pte; - vm_paddr_t pa; PMAP_LOCK(pmap); pte = pmap_pte(pmap, va); @@ -3275,7 +3267,6 @@ pmap_emulate_modified(pmap_t pmap, vm_of panic("pmap_emulate_modified: invalid pte"); #endif if (pte_test(pte, PTE_RO)) { - /* write to read only page in the kernel */ PMAP_UNLOCK(pmap); return (1); } @@ -3283,9 +3274,6 @@ pmap_emulate_modified(pmap_t pmap, vm_of tlb_update(pmap, va, *pte); if (!pte_test(pte, PTE_MANAGED)) panic("pmap_emulate_modified: unmanaged page"); - pa = TLBLO_PTE_TO_PA(*pte); - m = PHYS_TO_VM_PAGE(pa); - m->md.pv_flags |= PV_TABLE_REF; PMAP_UNLOCK(pmap); return (0); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201209081743.q88HhKBw029744>