Date: Wed, 04 Jul 2012 19:12:42 -0500 From: Alan Cox <alc@rice.edu> To: freebsd-ppc@freebsd.org Cc: Alan Cox <alc@rice.edu> Subject: pmap patch for 32-bit AIM Message-ID: <4FF4DBFA.4040404@rice.edu>
next in thread | raw e-mail | index | archive | help
This is a multi-part message in MIME format. --------------030506090302050607050201 Content-Type: text/plain; charset=ISO-8859-1; format=flowed Content-Transfer-Encoding: 7bit Can someone please verify that a 32-bit AIM system still boots up and runs after the attached patch is applied? Thanks, Alan --------------030506090302050607050201 Content-Type: text/plain; name="powerpc_aim_pmap.patch" Content-Transfer-Encoding: 7bit Content-Disposition: attachment; filename="powerpc_aim_pmap.patch" Index: powerpc/aim/mmu_oea.c =================================================================== --- powerpc/aim/mmu_oea.c (revision 237346) +++ powerpc/aim/mmu_oea.c (working copy) @@ -125,6 +125,7 @@ __FBSDID("$FreeBSD$"); #include <sys/msgbuf.h> #include <sys/mutex.h> #include <sys/proc.h> +#include <sys/rwlock.h> #include <sys/sched.h> #include <sys/sysctl.h> #include <sys/systm.h> @@ -204,6 +205,17 @@ struct pvo_head *moea_pvo_table; /* pvo entries b struct pvo_head moea_pvo_kunmanaged = LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ +/* + * Isolate the global pv list lock from data and other locks to prevent false + * sharing within the cache. + */ +static struct { + struct rwlock lock; + char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; +} pvh_global __aligned(CACHE_LINE_SIZE); + +#define pvh_global_lock pvh_global.lock + uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ @@ -455,7 +467,7 @@ static __inline void moea_attr_clear(vm_page_t m, int ptebit) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); m->md.mdpg_attrs &= ~ptebit; } @@ -470,7 +482,7 @@ static __inline void moea_attr_save(vm_page_t m, int ptebit) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); m->md.mdpg_attrs |= ptebit; } @@ -859,6 +871,11 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart CPU_FILL(&kernel_pmap->pm_active); RB_INIT(&kernel_pmap->pmap_pvo); + /* + * Initialize the global pv list lock. + */ + rw_init(&pvh_global_lock, "pmap pv global"); + /* * Set up the Open Firmware mappings */ @@ -1066,10 +1083,10 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) { - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); moea_enter_locked(pmap, va, m, prot, wired); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -1102,7 +1119,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_ pvo_flags = PVO_MANAGED; } if (pmap_bootstrapped) - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || VM_OBJECT_LOCKED(m->object), @@ -1166,14 +1183,14 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_ psize = atop(end - start); m = m_start; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { moea_enter_locked(pm, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); m = TAILQ_NEXT(m, listq); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pm); } @@ -1182,11 +1199,11 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t vm_prot_t prot) { - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pm); } @@ -1342,7 +1359,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); lo = moea_attr_fetch(m); powerpc_sync(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { @@ -1368,7 +1385,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m) vm_page_dirty(m); } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -1409,7 +1426,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_m return; } - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); pvo_head = vm_page_to_pvoh(m); lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); @@ -1429,7 +1446,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_m PMAP_UNLOCK(pmap); } m->md.mdpg_cache_attrs = ma; - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -1543,7 +1560,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_ ("moea_page_exists_quick: page %p is not managed", m)); loops = 0; rv = FALSE; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { if (pvo->pvo_pmap == pmap) { rv = TRUE; @@ -1552,7 +1569,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_ if (++loops >= 16) break; } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -1569,11 +1586,11 @@ moea_page_wired_mappings(mmu_t mmu, vm_page_t m) count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) if ((pvo->pvo_vaddr & PVO_WIRED) != 0) count++; - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (count); } @@ -1672,7 +1689,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva return; } - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); key.pvo_vaddr = sva; for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); @@ -1700,7 +1717,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva mtx_unlock(&moea_table_mutex); } } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pm); } @@ -1766,7 +1783,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, { struct pvo_entry *pvo, *tpvo, key; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); key.pvo_vaddr = sva; for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); @@ -1775,7 +1792,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, moea_pvo_remove(pvo, -1); } PMAP_UNLOCK(pm); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -1789,7 +1806,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m) struct pvo_entry *pvo, *next_pvo; pmap_t pmap; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); pvo_head = vm_page_to_pvoh(m); for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { next_pvo = LIST_NEXT(pvo, pvo_vlink); @@ -1804,7 +1821,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m) vm_page_dirty(m); } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -2279,7 +2296,7 @@ moea_query_bit(vm_page_t m, int ptebit) if (moea_attr_fetch(m) & ptebit) return (TRUE); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { /* @@ -2288,7 +2305,7 @@ moea_query_bit(vm_page_t m, int ptebit) */ if (pvo->pvo_pte.pte.pte_lo & ptebit) { moea_attr_save(m, ptebit); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (TRUE); } } @@ -2312,13 +2329,13 @@ moea_query_bit(vm_page_t m, int ptebit) mtx_unlock(&moea_table_mutex); if (pvo->pvo_pte.pte.pte_lo & ptebit) { moea_attr_save(m, ptebit); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (TRUE); } } } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (FALSE); } @@ -2329,7 +2346,7 @@ moea_clear_bit(vm_page_t m, int ptebit) struct pvo_entry *pvo; struct pte *pt; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); /* * Clear the cached value. @@ -2363,7 +2380,7 @@ moea_clear_bit(vm_page_t m, int ptebit) pvo->pvo_pte.pte.pte_lo &= ~ptebit; } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (count); } --------------030506090302050607050201--
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?4FF4DBFA.4040404>