Date: Thu, 2 Dec 2010 02:34:39 +0000 (UTC) From: Nathan Whitehorn <nwhitehorn@FreeBSD.org> To: src-committers@freebsd.org, svn-src-user@freebsd.org Subject: svn commit: r216108 - in user/nwhitehorn/ps3: conf powerpc/aim powerpc/ps3 Message-ID: <201012020234.oB22Ydab070700@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: nwhitehorn Date: Thu Dec 2 02:34:39 2010 New Revision: 216108 URL: http://svn.freebsd.org/changeset/base/216108 Log: Rewrite the hypervisor page table manipulation stuff to use KOBJ, and split the non-hypervisor page table manipulation code into a separate MMU module. This makes the page table interface and HV extensions vastly cleaner. There seems to be no speed penalty associated with this; the cost of the TLB invalidations and other operations required in page table manipulations vastly exceeds the function call overhead, and, through some perverse behavior of gcc, calling through function pointers can be somehow be faster than calling inline functions in some cases. Many thanks to Peter Grehan for suggesting this change. Added: user/nwhitehorn/ps3/powerpc/aim/moea64_if.m user/nwhitehorn/ps3/powerpc/aim/moea64_native.c Modified: user/nwhitehorn/ps3/conf/files.powerpc user/nwhitehorn/ps3/powerpc/aim/mmu_oea64.c user/nwhitehorn/ps3/powerpc/aim/mmu_oea64.h user/nwhitehorn/ps3/powerpc/ps3/mmu_ps3.c Modified: user/nwhitehorn/ps3/conf/files.powerpc ============================================================================== --- user/nwhitehorn/ps3/conf/files.powerpc Thu Dec 2 02:32:46 2010 (r216107) +++ user/nwhitehorn/ps3/conf/files.powerpc Thu Dec 2 02:34:39 2010 (r216108) @@ -82,6 +82,8 @@ powerpc/aim/interrupt.c optional aim powerpc/aim/locore.S optional aim no-obj powerpc/aim/machdep.c optional aim powerpc/aim/mmu_oea.c optional aim powerpc +powerpc/aim/moea64_native.c optional aim +powerpc/aim/moea64_if.m optional aim powerpc/aim/mmu_oea64.c optional aim powerpc/aim/mp_cpudep.c optional aim smp powerpc/aim/nexus.c optional aim Modified: user/nwhitehorn/ps3/powerpc/aim/mmu_oea64.c ============================================================================== --- user/nwhitehorn/ps3/powerpc/aim/mmu_oea64.c Thu Dec 2 02:32:46 2010 (r216107) +++ user/nwhitehorn/ps3/powerpc/aim/mmu_oea64.c Thu Dec 2 02:34:39 2010 (r216108) @@ -157,78 +157,11 @@ __FBSDID("$FreeBSD$"); #include "mmu_oea64.h" #include "mmu_if.h" +#include "moea64_if.h" -#define MOEA_DEBUG - -#define TODO panic("%s: not implemented", __func__); void moea64_release_vsid(uint64_t vsid); uintptr_t moea64_get_unique_vsid(void); -static __inline register_t -cntlzd(volatile register_t a) { - register_t b; - __asm ("cntlzd %0, %1" : "=r"(b) : "r"(a)); - return b; -} - -#define PTESYNC() __asm __volatile("ptesync"); -#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); -#define SYNC() __asm __volatile("sync"); -#define EIEIO() __asm __volatile("eieio"); - -/* - * The tlbie instruction must be executed in 64-bit mode - * so we have to twiddle MSR[SF] around every invocation. - * Just to add to the fun, exceptions must be off as well - * so that we can't trap in 64-bit mode. What a pain. - */ -struct mtx tlbie_mutex; - -static __inline void -TLBIE(uint64_t vpn) { -#ifndef __powerpc64__ - register_t vpn_hi, vpn_lo; - register_t msr; - register_t scratch; -#endif - - vpn <<= ADDR_PIDX_SHFT; - vpn &= ~(0xffffULL << 48); - - mtx_lock_spin(&tlbie_mutex); -#ifdef __powerpc64__ - __asm __volatile("\ - ptesync; \ - tlbie %0; \ - eieio; \ - tlbsync; \ - ptesync;" - :: "r"(vpn) : "memory"); -#else - vpn_hi = (uint32_t)(vpn >> 32); - vpn_lo = (uint32_t)vpn; - - __asm __volatile("\ - mfmsr %0; \ - mr %1, %0; \ - insrdi %1,%5,1,0; \ - mtmsrd %1; isync; \ - ptesync; \ - \ - sld %1,%2,%4; \ - or %1,%1,%3; \ - tlbie %1; \ - \ - mtmsrd %0; isync; \ - eieio; \ - tlbsync; \ - ptesync;" - : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) - : "memory"); -#endif - mtx_unlock_spin(&tlbie_mutex); -} - #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() #define ENABLE_TRANS(msr) mtmsr(msr); isync() @@ -319,8 +252,8 @@ SYSCTL_INT(_machdep, OID_AUTO, moea64_pv &moea64_pvo_remove_calls, 0, ""); vm_offset_t moea64_scratchpage_va[2]; -uint64_t moea64_scratchpage_vpn[2]; -struct lpte *moea64_scratchpage_pte[2]; +struct pvo_entry *moea64_scratchpage_pvo[2]; +uintptr_t moea64_scratchpage_pte[2]; struct mtx moea64_scratchpage_mtx; uint64_t moea64_large_page_mask = 0; @@ -328,49 +261,23 @@ int moea64_large_page_size = 0; int moea64_large_page_shift = 0; /* - * Hypervisor Hooks - */ -void (*moea64_pte_synch_hook)(struct lpte *pt, struct lpte *pvo_pt) = NULL; -void (*moea64_pte_clear_hook)(struct lpte *pt, struct lpte *pvo_pt, - uint64_t vpn, u_int64_t ptebit) = NULL; -void (*moea64_pte_unset_hook)(struct lpte *pt, struct lpte *pvo_pt, - uint64_t vpn) = NULL; -void (*moea64_pte_change_hook)(struct lpte *pt, struct lpte *pvo_pt, - uint64_t vpn) = NULL; -int (*moea64_pte_insert_hook)(u_int ptegidx, struct lpte *pvo_pt) = NULL; -struct lpte *(*moea64_pvo_to_pte_hook)(const struct pvo_entry *pvo) = NULL; - -/* - * PTE calls. - */ -static int moea64_pte_insert_native(u_int, struct lpte *); - -/* * PVO calls. */ -static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, +static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, vm_offset_t, vm_offset_t, uint64_t, int); -static void moea64_pvo_remove(struct pvo_entry *); +static void moea64_pvo_remove(mmu_t, struct pvo_entry *); static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); -static struct lpte *moea64_pvo_to_pte_native(const struct pvo_entry *); /* * Utility routines. */ -static void moea64_bootstrap_native(mmu_t mmup, - vm_offset_t kernelstart, vm_offset_t kernelend); -static void moea64_cpu_bootstrap(mmu_t, int ap); -static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t, - vm_prot_t, boolean_t); -static boolean_t moea64_query_bit(vm_page_t, u_int64_t); -static u_int moea64_clear_bit(vm_page_t, u_int64_t); +static void moea64_enter_locked(mmu_t, pmap_t, vm_offset_t, + vm_page_t, vm_prot_t, boolean_t); +static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); +static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); static void moea64_kremove(mmu_t, vm_offset_t); -static void moea64_syncicache(pmap_t pmap, vm_offset_t va, +static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz); -static void tlbia(void); -#ifdef __powerpc64__ -static void slbia(void); -#endif /* * Kernel MMU interface @@ -453,8 +360,6 @@ static mmu_method_t moea64_methods[] = { MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), /* Internal interfaces */ - MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), - MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap), MMUMETHOD(mmu_mapdev, moea64_mapdev), MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), MMUMETHOD(mmu_unmapdev, moea64_unmapdev), @@ -466,7 +371,7 @@ static mmu_method_t moea64_methods[] = { { 0, 0 } }; -MMU_DEF(oea64_mmu, MMU_TYPE_G5, moea64_methods, 0); +MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); static __inline u_int va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) @@ -532,81 +437,6 @@ moea64_pte_create(struct lpte *pt, uint6 pt->pte_lo = pte_lo; } -static __inline void -moea64_pte_synch_native(struct lpte *pt, struct lpte *pvo_pt) -{ - - ASSERT_TABLE_LOCK(); - - pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); -} - -static __inline void -moea64_pte_clear_native(struct lpte *pt, uint64_t vpn, u_int64_t ptebit) -{ - ASSERT_TABLE_LOCK(); - - /* - * As shown in Section 7.6.3.2.3 - */ - pt->pte_lo &= ~ptebit; - TLBIE(vpn); -} - -static __inline void -moea64_pte_set_native(struct lpte *pt, struct lpte *pvo_pt) -{ - - ASSERT_TABLE_LOCK(); - pvo_pt->pte_hi |= LPTE_VALID; - - /* - * Update the PTE as defined in section 7.6.3.1. - * Note that the REF/CHG bits are from pvo_pt and thus should have - * been saved so this routine can restore them (if desired). - */ - pt->pte_lo = pvo_pt->pte_lo; - EIEIO(); - pt->pte_hi = pvo_pt->pte_hi; - PTESYNC(); - moea64_pte_valid++; -} - -static __inline void -moea64_pte_unset_native(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn) -{ - ASSERT_TABLE_LOCK(); - pvo_pt->pte_hi &= ~LPTE_VALID; - - /* - * Force the reg & chg bits back into the PTEs. - */ - SYNC(); - - /* - * Invalidate the pte. - */ - pt->pte_hi &= ~LPTE_VALID; - TLBIE(vpn); - - /* - * Save the reg & chg bits. - */ - moea64_pte_synch_native(pt, pvo_pt); - moea64_pte_valid--; -} - -static __inline void -moea64_pte_change_native(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn) -{ - - /* - * Invalidate the PTE - */ - moea64_pte_unset_native(pt, pvo_pt, vpn); - moea64_pte_set_native(pt, pvo_pt); -} - static __inline uint64_t moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) { @@ -686,49 +516,6 @@ om_cmp(const void *a, const void *b) } static void -moea64_cpu_bootstrap(mmu_t mmup, int ap) -{ - int i = 0; - #ifdef __powerpc64__ - struct slb *slb = PCPU_GET(slb); - #endif - - /* - * Initialize segment registers and MMU - */ - - mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); - - /* - * Install kernel SLB entries - */ - - #ifdef __powerpc64__ - slbia(); - - for (i = 0; i < 64; i++) { - if (!(slb[i].slbe & SLBE_VALID)) - continue; - - __asm __volatile ("slbmte %0, %1" :: - "r"(slb[i].slbv), "r"(slb[i].slbe)); - } - #else - for (i = 0; i < 16; i++) - mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); - #endif - - /* - * Install page table - */ - - __asm __volatile ("ptesync; mtsdr1 %0; isync" - :: "r"((uintptr_t)moea64_pteg_table - | (64 - cntlzd(moea64_pteg_mask >> 11)))); - tlbia(); -} - -static void moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) { struct ofw_map translations[sz/sizeof(struct ofw_map)]; @@ -864,7 +651,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_o pregions[i].mr_start + pregions[i].mr_size) pte_lo |= LPTE_G; - moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, + moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, &moea64_pvo_kunmanaged, pa, pa, pte_lo, PVO_WIRED | PVO_LARGE | VM_PROT_EXECUTE); @@ -1004,87 +791,6 @@ moea64_early_bootstrap(mmu_t mmup, vm_of #endif /* PTEGCOUNT */ } -static void -moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, - vm_offset_t kernelend) -{ - int i; - vm_size_t size; - register_t msr; - - moea64_early_bootstrap(mmup, kernelstart, kernelend); - - /* - * Allocate PTEG table. - */ - - size = moea64_pteg_count * sizeof(struct lpteg); - CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", - moea64_pteg_count, size); - - /* - * We now need to allocate memory. This memory, to be allocated, - * has to reside in a page table. The page table we are about to - * allocate. We don't have BAT. So drop to data real mode for a minute - * as a measure of last resort. We do this a couple times. - */ - - moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); - DISABLE_TRANS(msr); - bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); - ENABLE_TRANS(msr); - - CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); - - /* - * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. - */ - mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); - - moea64_late_bootstrap(mmup, kernelstart, kernelend); - - /* - * Allocate some things for page zeroing. We put this directly - * in the page table, marked with LPTE_LOCKED, to avoid any - * of the PVO book-keeping or other parts of the VM system - * from even knowing that this hack exists. - */ - - if (!hw_direct_map) { - mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, - MTX_DEF); - for (i = 0; i < 2; i++) { - struct lpte pt; - uint64_t vsid; - int pteidx, ptegidx; - - moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; - virtual_end -= PAGE_SIZE; - - LOCK_TABLE(); - - vsid = va_to_vsid(kernel_pmap, - moea64_scratchpage_va[i]); - moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i], - LPTE_NOEXEC, 0); - pt.pte_hi |= LPTE_LOCKED; - - moea64_scratchpage_vpn[i] = (vsid << 16) | - ((moea64_scratchpage_va[i] & ADDR_PIDX) >> - ADDR_PIDX_SHFT); - ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i], 0); - pteidx = moea64_pte_insert_native(ptegidx, &pt); - if (pt.pte_hi & LPTE_HID) - ptegidx ^= moea64_pteg_mask; - - moea64_scratchpage_pte[i] = - &moea64_pteg_table[ptegidx].pt[pteidx]; - - UNLOCK_TABLE(); - } - } -} - void moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) { @@ -1269,6 +975,36 @@ moea64_late_bootstrap(mmu_t mmup, vm_off va += PAGE_SIZE; } dpcpu_init(dpcpu, 0); + + /* + * Allocate some things for page zeroing. We put this directly + * in the page table, marked with LPTE_LOCKED, to avoid any + * of the PVO book-keeping or other parts of the VM system + * from even knowing that this hack exists. + */ + + if (!hw_direct_map) { + mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, + MTX_DEF); + for (i = 0; i < 2; i++) { + moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; + virtual_end -= PAGE_SIZE; + + moea64_kenter(mmup, moea64_scratchpage_va[i], 0); + + moea64_scratchpage_pvo[i] = moea64_pvo_find_va( + kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); + LOCK_TABLE(); + moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( + mmup, moea64_scratchpage_pvo[i]); + moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi + |= LPTE_LOCKED; + MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], + &moea64_scratchpage_pvo[i]->pvo_pte.lpte, + moea64_scratchpage_pvo[i]->pvo_vpn); + UNLOCK_TABLE(); + } + } } /* @@ -1308,7 +1044,7 @@ void moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) { struct pvo_entry *pvo; - struct lpte *pt; + uintptr_t pt; uint64_t vsid; int i, ptegidx; @@ -1317,10 +1053,7 @@ moea64_change_wiring(mmu_t mmu, pmap_t p if (pvo != NULL) { LOCK_TABLE(); - if (moea64_pvo_to_pte_hook != NULL) - pt = moea64_pvo_to_pte_hook(pvo); - else - pt = moea64_pvo_to_pte_native(pvo); + pt = MOEA64_PVO_TO_PTE(mmu, pvo); if (wired) { if ((pvo->pvo_vaddr & PVO_WIRED) == 0) @@ -1334,14 +1067,10 @@ moea64_change_wiring(mmu_t mmu, pmap_t p pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; } - if (pt != NULL) { + if (pt != -1) { /* Update wiring flag in page table. */ - if (moea64_pte_change_hook != NULL) - moea64_pte_change_hook(pt, &pvo->pvo_pte.lpte, - pvo->pvo_vpn); - else - moea64_pte_change_native(pt, &pvo->pvo_pte.lpte, - pvo->pvo_vpn); + MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, + pvo->pvo_vpn); } else if (wired) { /* * If we are wiring the page, and it wasn't in the @@ -1351,13 +1080,8 @@ moea64_change_wiring(mmu_t mmu, pmap_t p ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), pvo->pvo_vaddr & PVO_LARGE); - if (moea64_pte_insert_hook != NULL) - i = moea64_pte_insert_hook(ptegidx, - &pvo->pvo_pte.lpte); - else - i = moea64_pte_insert_native(ptegidx, - &pvo->pvo_pte.lpte); - + i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); + if (i >= 0) { PVO_PTEGIDX_CLR(pvo); PVO_PTEGIDX_SET(pvo, i); @@ -1377,22 +1101,18 @@ moea64_change_wiring(mmu_t mmu, pmap_t p */ static __inline -void moea64_set_scratchpage_pa(int which, vm_offset_t pa) { +void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); - moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID; - TLBIE(moea64_scratchpage_vpn[which]); - - moea64_scratchpage_pte[which]->pte_lo &= + moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= ~(LPTE_WIMG | LPTE_RPGN); - moea64_scratchpage_pte[which]->pte_lo |= + moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; - EIEIO(); - - moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID; - PTESYNC(); isync(); + MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], + &moea64_scratchpage_pvo[which]->pvo_pte.lpte, + moea64_scratchpage_pvo[which]->pvo_vpn); } void @@ -1409,8 +1129,8 @@ moea64_copy_page(mmu_t mmu, vm_page_t ms } else { mtx_lock(&moea64_scratchpage_mtx); - moea64_set_scratchpage_pa(0,src); - moea64_set_scratchpage_pa(1,dst); + moea64_set_scratchpage_pa(mmu, 0, src); + moea64_set_scratchpage_pa(mmu, 1, dst); kcopy((void *)moea64_scratchpage_va[0], (void *)moea64_scratchpage_va[1], PAGE_SIZE); @@ -1433,7 +1153,7 @@ moea64_zero_page_area(mmu_t mmu, vm_page bzero((caddr_t)pa + off, size); } else { mtx_lock(&moea64_scratchpage_mtx); - moea64_set_scratchpage_pa(0,pa); + moea64_set_scratchpage_pa(mmu, 0, pa); bzero((caddr_t)moea64_scratchpage_va[0] + off, size); mtx_unlock(&moea64_scratchpage_mtx); } @@ -1454,7 +1174,7 @@ moea64_zero_page(mmu_t mmu, vm_page_t m) if (!hw_direct_map) { mtx_lock(&moea64_scratchpage_mtx); - moea64_set_scratchpage_pa(0,pa); + moea64_set_scratchpage_pa(mmu, 0, pa); va = moea64_scratchpage_va[0]; } else { va = pa; @@ -1486,7 +1206,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_ vm_page_lock_queues(); PMAP_LOCK(pmap); - moea64_enter_locked(pmap, va, m, prot, wired); + moea64_enter_locked(mmu, pmap, va, m, prot, wired); vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -1500,8 +1220,8 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_ */ static void -moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - boolean_t wired) +moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, + vm_prot_t prot, boolean_t wired) { struct pvo_head *pvo_head; uma_zone_t zone; @@ -1555,20 +1275,20 @@ moea64_enter_locked(pmap_t pmap, vm_offs if ((m->flags & PG_FICTITIOUS) != 0) pvo_flags |= PVO_FAKE; - error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), - pte_lo, pvo_flags); + error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, + VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); /* * Flush the page from the instruction cache if this page is * mapped executable and cacheable. */ - if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { - moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); - } + if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) + moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); } static void -moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) +moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, + vm_size_t sz) { /* @@ -1595,7 +1315,7 @@ moea64_syncicache(pmap_t pmap, vm_offset mtx_lock(&moea64_scratchpage_mtx); - moea64_set_scratchpage_pa(1,pa & ~ADDR_POFF); + moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); __syncicache((void *)(moea64_scratchpage_va[1] + (va & ADDR_POFF)), sz); @@ -1627,7 +1347,7 @@ moea64_enter_object(mmu_t mmu, pmap_t pm vm_page_lock_queues(); PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { - moea64_enter_locked(pm, start + ptoa(diff), m, prot & + moea64_enter_locked(mmu, pm, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); m = TAILQ_NEXT(m, listq); } @@ -1642,8 +1362,8 @@ moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_page_lock_queues(); PMAP_LOCK(pm); - moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - FALSE); + moea64_enter_locked(mmu, pm, va, m, + prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); vm_page_unlock_queues(); PMAP_UNLOCK(pm); } @@ -1696,6 +1416,8 @@ retry: return (m); } +static mmu_t installed_mmu; + static void * moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { @@ -1736,7 +1458,7 @@ moea64_uma_page_alloc(uma_zone_t zone, i va = VM_PAGE_TO_PHYS(m); - moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, + moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP); @@ -1763,6 +1485,7 @@ moea64_init(mmu_t mmu) UMA_ZONE_VM | UMA_ZONE_NOFREE); if (!hw_direct_map) { + installed_mmu = mmu; uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); } @@ -1776,7 +1499,7 @@ moea64_is_referenced(mmu_t mmu, vm_page_ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("moea64_is_referenced: page %p is not managed", m)); - return (moea64_query_bit(m, PTE_REF)); + return (moea64_query_bit(mmu, m, PTE_REF)); } boolean_t @@ -1795,7 +1518,7 @@ moea64_is_modified(mmu_t mmu, vm_page_t if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0) return (FALSE); - return (moea64_query_bit(m, LPTE_CHG)); + return (moea64_query_bit(mmu, m, LPTE_CHG)); } boolean_t @@ -1817,7 +1540,7 @@ moea64_clear_reference(mmu_t mmu, vm_pag KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("moea64_clear_reference: page %p is not managed", m)); - moea64_clear_bit(m, LPTE_REF); + moea64_clear_bit(mmu, m, LPTE_REF); } void @@ -1837,7 +1560,7 @@ moea64_clear_modify(mmu_t mmu, vm_page_t */ if ((m->flags & PG_WRITEABLE) == 0) return; - moea64_clear_bit(m, LPTE_CHG); + moea64_clear_bit(mmu, m, LPTE_CHG); } /* @@ -1847,7 +1570,7 @@ void moea64_remove_write(mmu_t mmu, vm_page_t m) { struct pvo_entry *pvo; - struct lpte *pt; + uintptr_t pt; pmap_t pmap; uint64_t lo; @@ -1865,31 +1588,21 @@ moea64_remove_write(mmu_t mmu, vm_page_t return; vm_page_lock_queues(); lo = moea64_attr_fetch(m); - SYNC(); + powerpc_sync(); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { pmap = pvo->pvo_pmap; PMAP_LOCK(pmap); LOCK_TABLE(); if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { - if (moea64_pvo_to_pte_hook != NULL) - pt = moea64_pvo_to_pte_hook(pvo); - else - pt = moea64_pvo_to_pte_native(pvo); + pt = MOEA64_PVO_TO_PTE(mmu, pvo); pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; - if (pt != NULL) { - if (moea64_pte_synch_hook != NULL) - moea64_pte_synch_hook(pt, &pvo->pvo_pte.lpte); - else - moea64_pte_synch_native(pt, &pvo->pvo_pte.lpte); + if (pt != -1) { + MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); lo |= pvo->pvo_pte.lpte.pte_lo; pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; - if (moea64_pte_change_hook != NULL) - moea64_pte_change_hook(pt, - &pvo->pvo_pte.lpte, pvo->pvo_vpn); - else - moea64_pte_change_native(pt, - &pvo->pvo_pte.lpte, pvo->pvo_vpn); + MOEA64_PTE_CHANGE(mmu, pt, + &pvo->pvo_pte.lpte, pvo->pvo_vpn); if (pvo->pvo_pmap == kernel_pmap) isync(); } @@ -1923,7 +1636,7 @@ moea64_ts_referenced(mmu_t mmu, vm_page_ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("moea64_ts_referenced: page %p is not managed", m)); - return (moea64_clear_bit(m, LPTE_REF)); + return (moea64_clear_bit(mmu, m, LPTE_REF)); } /* @@ -1934,7 +1647,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_pa { struct pvo_entry *pvo; struct pvo_head *pvo_head; - struct lpte *pt; + uintptr_t pt; pmap_t pmap; uint64_t lo; @@ -1950,19 +1663,12 @@ moea64_page_set_memattr(mmu_t mmu, vm_pa pmap = pvo->pvo_pmap; PMAP_LOCK(pmap); LOCK_TABLE(); - if (moea64_pvo_to_pte_hook != NULL) - pt = moea64_pvo_to_pte_hook(pvo); - else - pt = moea64_pvo_to_pte_native(pvo); + pt = MOEA64_PVO_TO_PTE(mmu, pvo); pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; pvo->pvo_pte.lpte.pte_lo |= lo; - if (pt != NULL) { - if (moea64_pte_change_hook != NULL) - moea64_pte_change_hook(pt, &pvo->pvo_pte.lpte, - pvo->pvo_vpn); - else - moea64_pte_change_native(pt, &pvo->pvo_pte.lpte, - pvo->pvo_vpn); + if (pt != -1) { + MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, + pvo->pvo_vpn); if (pvo->pvo_pmap == kernel_pmap) isync(); } @@ -1985,7 +1691,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_ pte_lo = moea64_calc_wimg(pa, ma); PMAP_LOCK(kernel_pmap); - error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, + error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, &moea64_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED | VM_PROT_EXECUTE); @@ -1996,9 +1702,8 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_ /* * Flush the memory from the instruction cache. */ - if ((pte_lo & (LPTE_I | LPTE_G)) == 0) { + if ((pte_lo & (LPTE_I | LPTE_G)) == 0) __syncicache((void *)va, PAGE_SIZE); - } PMAP_UNLOCK(kernel_pmap); } @@ -2227,7 +1932,7 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_ vm_prot_t prot) { struct pvo_entry *pvo; - struct lpte *pt; + uintptr_t pt; CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, eva, prot); @@ -2253,10 +1958,7 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_ * copy. */ LOCK_TABLE(); - if (moea64_pvo_to_pte_hook != NULL) - pt = moea64_pvo_to_pte_hook(pvo); - else - pt = moea64_pvo_to_pte_native(pvo); + pt = MOEA64_PVO_TO_PTE(mmu, pvo); /* * Change the protection of the page. @@ -2270,16 +1972,12 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_ /* * If the PVO is in the page table, update that pte as well. */ - if (pt != NULL) { - if (moea64_pte_change_hook != NULL) - moea64_pte_change_hook(pt, &pvo->pvo_pte.lpte, - pvo->pvo_vpn); - else - moea64_pte_change_native(pt, &pvo->pvo_pte.lpte, - pvo->pvo_vpn); + if (pt != -1) { + MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, + pvo->pvo_vpn); if ((pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { - moea64_syncicache(pm, sva, + moea64_syncicache(mmu, pm, sva, pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE); } @@ -2366,7 +2064,7 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_o for (; sva < eva; sva += PAGE_SIZE) { pvo = moea64_pvo_find_va(pm, sva); if (pvo != NULL) - moea64_pvo_remove(pvo); + moea64_pvo_remove(mmu, pvo); } vm_page_unlock_queues(); PMAP_UNLOCK(pm); @@ -2391,7 +2089,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m MOEA_PVO_CHECK(pvo); /* sanity check */ pmap = pvo->pvo_pmap; PMAP_LOCK(pmap); - moea64_pvo_remove(pvo); + moea64_pvo_remove(mmu, pvo); PMAP_UNLOCK(pmap); } if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { @@ -2448,53 +2146,10 @@ moea64_bootstrap_alloc(vm_size_t size, u panic("moea64_bootstrap_alloc: could not allocate memory"); } -static void -tlbia(void) -{ - vm_offset_t i; - #ifndef __powerpc64__ - register_t msr, scratch; - #endif - - TLBSYNC(); - - for (i = 0; i < 0xFF000; i += 0x00001000) { - #ifdef __powerpc64__ - __asm __volatile("tlbiel %0" :: "r"(i)); - #else - __asm __volatile("\ - mfmsr %0; \ - mr %1, %0; \ - insrdi %1,%3,1,0; \ - mtmsrd %1; \ - isync; \ - \ - tlbiel %2; \ - \ - mtmsrd %0; \ - isync;" - : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); - #endif - } - - EIEIO(); - TLBSYNC(); -} - -#ifdef __powerpc64__ -static void -slbia(void) -{ - register_t seg0; - - __asm __volatile ("slbia"); - __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0)); -} -#endif - static int -moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, - vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) +moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, + struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, + uint64_t pte_lo, int flags) { struct pvo_entry *pvo; uint64_t vsid; @@ -2540,12 +2195,8 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z (pte_lo & LPTE_PP)) { if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { /* Re-insert if spilled */ - if (moea64_pte_insert_hook != NULL) - i = moea64_pte_insert_hook(ptegidx, - &pvo->pvo_pte.lpte); - else - i = moea64_pte_insert_native(ptegidx, - &pvo->pvo_pte.lpte); + i = MOEA64_PTE_INSERT(mmu, ptegidx, + &pvo->pvo_pte.lpte); if (i >= 0) PVO_PTEGIDX_SET(pvo, i); moea64_pte_overflow--; @@ -2553,7 +2204,7 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z UNLOCK_TABLE(); return (0); } - moea64_pvo_remove(pvo); + moea64_pvo_remove(mmu, pvo); break; } } @@ -2628,10 +2279,7 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z /* * We hope this succeeds but it isn't required. */ - if (moea64_pte_insert_hook != NULL) - i = moea64_pte_insert_hook(ptegidx, &pvo->pvo_pte.lpte); - else - i = moea64_pte_insert_native(ptegidx, &pvo->pvo_pte.lpte); + i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); if (i >= 0) { PVO_PTEGIDX_SET(pvo, i); } else { @@ -2657,24 +2305,18 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z } static void -moea64_pvo_remove(struct pvo_entry *pvo) +moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) { - struct lpte *pt; + uintptr_t pt; /* * If there is an active pte entry, we need to deactivate it (and * save the ref & cfg bits). */ LOCK_TABLE(); - if (moea64_pvo_to_pte_hook != NULL) - pt = moea64_pvo_to_pte_hook(pvo); - else - pt = moea64_pvo_to_pte_native(pvo); - if (pt != NULL) { - if (moea64_pte_unset_hook != NULL) - moea64_pte_unset_hook(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); - else - moea64_pte_unset_native(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); + pt = MOEA64_PVO_TO_PTE(mmu, pvo); + if (pt != -1) { + MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); PVO_PTEGIDX_CLR(pvo); } else { moea64_pte_overflow--; @@ -2763,75 +2405,6 @@ moea64_pvo_find_va(pmap_t pm, vm_offset_ return (pvo); } -static struct lpte * -moea64_pvo_to_pte_native(const struct pvo_entry *pvo) -{ - struct lpte *pt; - int pteidx, ptegidx; - uint64_t vsid; - - ASSERT_TABLE_LOCK(); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201012020234.oB22Ydab070700>