Date: Thu, 24 Jan 2008 09:45:29 GMT From: Rafal Jaworowski <raj@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 133992 for review Message-ID: <200801240945.m0O9jT17017189@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=133992 Change 133992 by raj@raj_mimi on 2008/01/24 09:45:14 Various cosmetics and cleanups. Affected files ... .. //depot/projects/e500/sys/powerpc/booke/pmap.c#9 edit Differences ... ==== //depot/projects/e500/sys/powerpc/booke/pmap.c#9 (text+ko) ==== @@ -108,7 +108,7 @@ /* Kernel physical load address. */ extern uint32_t kernload; -#define MEM_REGIONS 8 +#define MEM_REGIONS 8 struct mem_region availmem_regions[MEM_REGIONS]; int availmem_regions_sz; @@ -143,7 +143,7 @@ * If user pmap is processed with mmu_booke_remove and the resident count * drops to 0, there are no more pages to remove, so we need not continue. */ -#define PMAP_REMOVE_DONE(pmap) \ +#define PMAP_REMOVE_DONE(pmap) \ ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) extern void load_pid0(tlbtid_t); @@ -757,7 +757,7 @@ pte = &ptbl[ptbl_idx]; - if (!PTE_ISVALID(pte)) + if (pte == NULL || !PTE_ISVALID(pte)) return (0); /* Get vm_page_t for mapped pte. */ @@ -1334,7 +1334,7 @@ } /* - * Initialize the pmap associated with process 0. + * Initialize pmap associated with process 0. */ void mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) @@ -1592,17 +1592,6 @@ { //debugf("mmu_booke_enter_quick: s\n"); -#if 0 - /* XXX this is the old way - test if the new approach is really ok..? */ - vm_page_busy(m); - vm_page_unlock_queues(); - VM_OBJECT_UNLOCK(m->object); - mmu_booke_enter(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); - VM_OBJECT_LOCK(m->object); - vm_page_lock_queues(); - vm_page_wakeup(m); -#endif - PMAP_LOCK(pmap); mmu_booke_enter_locked(mmu, pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); @@ -1811,7 +1800,7 @@ } /* Referenced pages. */ - if (PTE_ISREFERENCED(pte)) + if (PTE_ISREFERENCED(pte)) vm_page_flag_set(m, PG_REFERENCED); /* Flush mapping from TLB0. */ @@ -1824,57 +1813,10 @@ vm_page_unlock_queues(); } -#if 0 /* - * Lower the permission for all mappings to a given page. + * Clear the write and modified bits in each of the given page's mappings. */ void -mmu_booke_page_protect(vm_page_t m, vm_prot_t prot) -{ - pv_entry_t pv; - pte_t *pte; - - if ((prot & VM_PROT_WRITE) != 0) - return; - - if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0) { - mmu_booke_remove_all(m); - return; - } - - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || - (m->flags & PG_WRITEABLE) == 0) - return; - - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { - PMAP_LOCK(pv->pv_pmap); - if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL) { - if (PTE_ISVALID(pte)) { - m = PHYS_TO_VM_PAGE(PTE_PA(pte)); - - /* Handle modified pages. */ - if (PTE_ISMODIFIED(pte)) { - if (mmu_booke_track_modified(pv->pv_pmap, pv->pv_va)) - vm_page_dirty(m); - } - - /* Referenced pages. */ - if (PTE_ISREFERENCED(pte)) - vm_page_flag_set(m, PG_REFERENCED); - - /* Flush mapping from TLB0. */ - pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | PTE_REFERENCED); - tlb0_flush_entry(pv->pv_pmap, pv->pv_va); - } - } - PMAP_UNLOCK(pv->pv_pmap); - } - vm_page_flag_clear(m, PG_WRITEABLE); -} -#endif - -void mmu_booke_remove_write(mmu_t mmu, vm_page_t m) { pv_entry_t pv; @@ -2101,7 +2043,7 @@ return (TRUE); } } - make_sure_to_unlock: +make_sure_to_unlock: PMAP_UNLOCK(pv->pv_pmap); } return (FALSE); @@ -2248,11 +2190,10 @@ } /* - * Returns true if the pmap's pv is one of the first - * 16 pvs linked to from this page. This count may - * be changed upwards or downwards in the future; it - * is only necessary that true be returned for a small - * subset of pmaps for proper page aging. + * Return true if the pmap's pv is one of the first 16 pvs linked to from this + * page. This count may be changed upwards or downwards in the future; it is + * only necessary that true be returned for a small subset of pmaps for proper + * page aging. */ boolean_t mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) @@ -2294,9 +2235,8 @@ TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) - if (PTE_ISVALID(pte)) - if (PTE_ISWIRED(pte)) - count++; + if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) + count++; PMAP_UNLOCK(pv->pv_pmap); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200801240945.m0O9jT17017189>