Date: Thu, 14 Jun 2018 17:23:52 +0000 (UTC) From: Justin Hibbits <jhibbits@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r335160 - in head/sys: conf powerpc/aim powerpc/include Message-ID: <201806141723.w5EHNqG5098421@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: jhibbits Date: Thu Jun 14 17:23:51 2018 New Revision: 335160 URL: https://svnweb.freebsd.org/changeset/base/335160 Log: Split the PowerISA 3.0 HPT implementation from historic PowerISA 3.0 makes several changes to not only the format of the HPT but also the behavior surrounding it. For instance, TLBIE no longer requires serialization. Removing this lock cuts buildworld time in half on a 18-core/72-thread POWER9 system, demonstrating that this lock is highly contended on such a system. There was odd behavior observed trying to make this change in a backwards-compatible manner in moea64_native.c, so the best option was to fully split it, and largely revert the original changes adding POWER9 support to the original file. Suggested by: nwhitehorn Added: head/sys/powerpc/aim/isa3_hashtb.c - copied, changed from r335146, head/sys/powerpc/aim/moea64_native.c Modified: head/sys/conf/files.powerpc head/sys/powerpc/aim/aim_machdep.c head/sys/powerpc/aim/moea64_native.c head/sys/powerpc/include/mmuvar.h Modified: head/sys/conf/files.powerpc ============================================================================== --- head/sys/conf/files.powerpc Thu Jun 14 17:21:09 2018 (r335159) +++ head/sys/conf/files.powerpc Thu Jun 14 17:23:51 2018 (r335160) @@ -102,6 +102,7 @@ libkern/qdivrem.c optional powerpc | powerpcspe libkern/ucmpdi2.c optional powerpc | powerpcspe libkern/udivdi3.c optional powerpc | powerpcspe libkern/umoddi3.c optional powerpc | powerpcspe +powerpc/aim/isa3_hashtb.c optional aim powerpc64 powerpc/aim/locore.S optional aim no-obj powerpc/aim/aim_machdep.c optional aim powerpc/aim/mmu_oea.c optional aim powerpc Modified: head/sys/powerpc/aim/aim_machdep.c ============================================================================== --- head/sys/powerpc/aim/aim_machdep.c Thu Jun 14 17:21:09 2018 (r335159) +++ head/sys/powerpc/aim/aim_machdep.c Thu Jun 14 17:23:51 2018 (r335160) @@ -420,7 +420,9 @@ aim_cpu_init(vm_offset_t toc) * in case the platform module had a better idea of what we * should do. */ - if (cpu_features & PPC_FEATURE_64) + if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) + pmap_mmu_install(MMU_TYPE_P9H, BUS_PROBE_GENERIC); + else if (cpu_features & PPC_FEATURE_64) pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); else pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); Copied and modified: head/sys/powerpc/aim/isa3_hashtb.c (from r335146, head/sys/powerpc/aim/moea64_native.c) ============================================================================== --- head/sys/powerpc/aim/moea64_native.c Thu Jun 14 16:01:11 2018 (r335146, copy source) +++ head/sys/powerpc/aim/isa3_hashtb.c Thu Jun 14 17:23:51 2018 (r335160) @@ -134,56 +134,13 @@ __FBSDID("$FreeBSD$"); /* POWER9 only permits a 64k partition table size. */ #define PART_SIZE 0x10000 -static int moea64_crop_tlbie; - static __inline void -TLBIE(uint64_t vpn) { -#ifndef __powerpc64__ - register_t vpn_hi, vpn_lo; - register_t msr; - register_t scratch, intr; -#endif - - static volatile u_int tlbie_lock = 0; - +TLBIE(uint64_t vpn) +{ vpn <<= ADDR_PIDX_SHFT; - /* Hobo spinlock: we need stronger guarantees than mutexes provide */ - while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); - isync(); /* Flush instruction queue once lock acquired */ - - if (moea64_crop_tlbie) - vpn &= ~(0xffffULL << 48); - -#ifdef __powerpc64__ __asm __volatile("tlbie %0" :: "r"(vpn) : "memory"); __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); -#else - vpn_hi = (uint32_t)(vpn >> 32); - vpn_lo = (uint32_t)vpn; - - intr = intr_disable(); - __asm __volatile("\ - mfmsr %0; \ - mr %1, %0; \ - insrdi %1,%5,1,0; \ - mtmsrd %1; isync; \ - \ - sld %1,%2,%4; \ - or %1,%1,%3; \ - tlbie %1; \ - \ - mtmsrd %0; isync; \ - eieio; \ - tlbsync; \ - ptesync;" - : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) - : "memory"); - intr_restore(intr); -#endif - - /* No barriers or special ops -- taken care of by ptesync above */ - tlbie_lock = 0; } #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) @@ -192,48 +149,48 @@ TLBIE(uint64_t vpn) { /* * PTEG data. */ -static volatile struct pate *moea64_part_table; -static volatile struct lpte *moea64_pteg_table; -static struct rwlock moea64_eviction_lock; +static volatile struct pate *isa3_part_table; +static volatile struct lpte *isa3_hashtb_pteg_table; +static struct rwlock isa3_hashtb_eviction_lock; /* * PTE calls. */ -static int moea64_pte_insert_native(mmu_t, struct pvo_entry *); -static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *); -static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t); -static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int); -static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *); +static int isa3_hashtb_pte_insert(mmu_t, struct pvo_entry *); +static int64_t isa3_hashtb_pte_synch(mmu_t, struct pvo_entry *); +static int64_t isa3_hashtb_pte_clear(mmu_t, struct pvo_entry *, uint64_t); +static int64_t isa3_hashtb_pte_replace(mmu_t, struct pvo_entry *, int); +static int64_t isa3_hashtb_pte_unset(mmu_t mmu, struct pvo_entry *); /* * Utility routines. */ -static void moea64_bootstrap_native(mmu_t mmup, +static void isa3_hashtb_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend); -static void moea64_cpu_bootstrap_native(mmu_t, int ap); +static void isa3_hashtb_cpu_bootstrap(mmu_t, int ap); static void tlbia(void); -static mmu_method_t moea64_native_methods[] = { +static mmu_method_t isa3_hashtb_methods[] = { /* Internal interfaces */ - MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), - MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native), + MMUMETHOD(mmu_bootstrap, isa3_hashtb_bootstrap), + MMUMETHOD(mmu_cpu_bootstrap, isa3_hashtb_cpu_bootstrap), - MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native), - MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native), - MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native), - MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native), - MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native), + MMUMETHOD(moea64_pte_synch, isa3_hashtb_pte_synch), + MMUMETHOD(moea64_pte_clear, isa3_hashtb_pte_clear), + MMUMETHOD(moea64_pte_unset, isa3_hashtb_pte_unset), + MMUMETHOD(moea64_pte_replace, isa3_hashtb_pte_replace), + MMUMETHOD(moea64_pte_insert, isa3_hashtb_pte_insert), { 0, 0 } }; -MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, +MMU_DEF_INHERIT(isa3_mmu_native, MMU_TYPE_P9H, isa3_hashtb_methods, 0, oea64_mmu); static int64_t -moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo) +isa3_hashtb_pte_synch(mmu_t mmu, struct pvo_entry *pvo) { - volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; + volatile struct lpte *pt = isa3_hashtb_pteg_table + pvo->pvo_pte.slot; struct lpte properpt; uint64_t ptelo; @@ -241,26 +198,26 @@ moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *p moea64_pte_from_pvo(pvo, &properpt); - rw_rlock(&moea64_eviction_lock); + rw_rlock(&isa3_hashtb_eviction_lock); if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != (properpt.pte_hi & LPTE_AVPN_MASK)) { /* Evicted */ - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); return (-1); } PTESYNC(); ptelo = be64toh(pt->pte_lo); - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); return (ptelo & (LPTE_REF | LPTE_CHG)); } static int64_t -moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit) +isa3_hashtb_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit) { - volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; + volatile struct lpte *pt = isa3_hashtb_pteg_table + pvo->pvo_pte.slot; struct lpte properpt; uint64_t ptelo; @@ -268,11 +225,11 @@ moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *p moea64_pte_from_pvo(pvo, &properpt); - rw_rlock(&moea64_eviction_lock); + rw_rlock(&isa3_hashtb_eviction_lock); if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != (properpt.pte_hi & LPTE_AVPN_MASK)) { /* Evicted */ - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); return (-1); } @@ -289,35 +246,35 @@ moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *p #else ((uint8_t *)(&properpt.pte_lo))[1]; #endif - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); critical_enter(); TLBIE(pvo->pvo_vpn); critical_exit(); } else { - rw_runlock(&moea64_eviction_lock); - ptelo = moea64_pte_unset_native(mmu, pvo); - moea64_pte_insert_native(mmu, pvo); + rw_runlock(&isa3_hashtb_eviction_lock); + ptelo = isa3_hashtb_pte_unset(mmu, pvo); + isa3_hashtb_pte_insert(mmu, pvo); } return (ptelo & (LPTE_REF | LPTE_CHG)); } static int64_t -moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo) +isa3_hashtb_pte_unset(mmu_t mmu, struct pvo_entry *pvo) { - volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; + volatile struct lpte *pt = isa3_hashtb_pteg_table + pvo->pvo_pte.slot; struct lpte properpt; uint64_t ptelo; moea64_pte_from_pvo(pvo, &properpt); - rw_rlock(&moea64_eviction_lock); + rw_rlock(&isa3_hashtb_eviction_lock); if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) != (properpt.pte_hi & LPTE_AVPN_MASK)) { /* Evicted */ moea64_pte_overflow--; - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); return (-1); } @@ -333,7 +290,7 @@ moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *p ptelo = be64toh(pt->pte_lo); *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ critical_exit(); - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); /* Keep statistics */ moea64_pte_valid--; @@ -342,9 +299,9 @@ moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *p } static int64_t -moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags) +isa3_hashtb_pte_replace(mmu_t mmu, struct pvo_entry *pvo, int flags) { - volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; + volatile struct lpte *pt = isa3_hashtb_pteg_table + pvo->pvo_pte.slot; struct lpte properpt; int64_t ptelo; @@ -352,32 +309,30 @@ moea64_pte_replace_native(mmu_t mmu, struct pvo_entry /* Just some software bits changing. */ moea64_pte_from_pvo(pvo, &properpt); - rw_rlock(&moea64_eviction_lock); + rw_rlock(&isa3_hashtb_eviction_lock); if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != (properpt.pte_hi & LPTE_AVPN_MASK)) { - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); return (-1); } pt->pte_hi = htobe64(properpt.pte_hi); ptelo = be64toh(pt->pte_lo); - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); } else { /* Otherwise, need reinsertion and deletion */ - ptelo = moea64_pte_unset_native(mmu, pvo); - moea64_pte_insert_native(mmu, pvo); + ptelo = isa3_hashtb_pte_unset(mmu, pvo); + isa3_hashtb_pte_insert(mmu, pvo); } return (ptelo); } static void -moea64_cpu_bootstrap_native(mmu_t mmup, int ap) +isa3_hashtb_cpu_bootstrap(mmu_t mmup, int ap) { int i = 0; - #ifdef __powerpc64__ struct slb *slb = PCPU_GET(aim.slb); register_t seg0; - #endif /* * Initialize segment registers and MMU @@ -395,41 +350,30 @@ moea64_cpu_bootstrap_native(mmu_t mmup, int ap) * Install kernel SLB entries */ - #ifdef __powerpc64__ - __asm __volatile ("slbia"); - __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : - "r"(0)); + __asm __volatile ("slbia"); + __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : + "r"(0)); - for (i = 0; i < n_slbs; i++) { - if (!(slb[i].slbe & SLBE_VALID)) - continue; + for (i = 0; i < n_slbs; i++) { + if (!(slb[i].slbe & SLBE_VALID)) + continue; - __asm __volatile ("slbmte %0, %1" :: - "r"(slb[i].slbv), "r"(slb[i].slbe)); - } - #else - for (i = 0; i < 16; i++) - mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); - #endif + __asm __volatile ("slbmte %0, %1" :: + "r"(slb[i].slbv), "r"(slb[i].slbe)); + } /* * Install page table */ - if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { - mtspr(SPR_PTCR, - ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) | - flsl((PART_SIZE >> 12) - 1)); - } else { - __asm __volatile ("ptesync; mtsdr1 %0; isync" - :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) - | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); - } + mtspr(SPR_PTCR, + ((uintptr_t)isa3_part_table & ~DMAP_BASE_ADDRESS) | + flsl((PART_SIZE >> 12) - 1)); tlbia(); } static void -moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, +isa3_hashtb_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) { vm_size_t size; @@ -439,23 +383,14 @@ moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernel moea64_early_bootstrap(mmup, kernelstart, kernelend); - switch (mfpvr() >> 16) { - case IBMPOWER4: - case IBMPOWER4PLUS: - case IBM970: - case IBM970FX: - case IBM970GX: - case IBM970MP: - moea64_crop_tlbie = true; - } /* * Allocate PTEG table. */ size = moea64_pteg_count * sizeof(struct lpteg); - CTR2(KTR_PMAP, "moea64_bootstrap: %lu PTEGs, %lu bytes", + CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %lu bytes", moea64_pteg_count, size); - rw_init(&moea64_eviction_lock, "pte eviction"); + rw_init(&isa3_hashtb_eviction_lock, "pte eviction"); /* * We now need to allocate memory. This memory, to be allocated, @@ -464,38 +399,30 @@ moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernel * as a measure of last resort. We do this a couple times. */ - if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { - moea64_part_table = - (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE); - if (hw_direct_map) - moea64_part_table = (struct pate *)PHYS_TO_DMAP( - (vm_offset_t)moea64_part_table); - } + isa3_part_table = + (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE); + if (hw_direct_map) + isa3_part_table = (struct pate *)PHYS_TO_DMAP( + (vm_offset_t)isa3_part_table); /* * PTEG table must be aligned on a 256k boundary, but can be placed - * anywhere with that alignment on POWER ISA 3+ systems. On earlier - * systems, offset addition is done by the CPU with bitwise OR rather - * than addition, so the table must also be aligned on a boundary of - * its own size. Pick the larger of the two, which works on all - * systems. + * anywhere with that alignment on POWER ISA 3+ systems. */ - moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, + isa3_hashtb_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, MAX(256*1024, size)); if (hw_direct_map) - moea64_pteg_table = - (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table); + isa3_hashtb_pteg_table = + (struct lpte *)PHYS_TO_DMAP((vm_offset_t)isa3_hashtb_pteg_table); DISABLE_TRANS(msr); - if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { - bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE); - moea64_part_table[0].pagetab = - ((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) | - (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11)); - } - bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * + bzero(__DEVOLATILE(void *, isa3_part_table), PART_SIZE); + isa3_part_table[0].pagetab = + ((uintptr_t)isa3_hashtb_pteg_table & ~DMAP_BASE_ADDRESS) | + (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11)); + bzero(__DEVOLATILE(void *, isa3_hashtb_pteg_table), moea64_pteg_count * sizeof(struct lpteg)); ENABLE_TRANS(msr); - CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); + CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", isa3_hashtb_pteg_table); moea64_mid_bootstrap(mmup, kernelstart, kernelend); @@ -504,7 +431,7 @@ moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernel */ if (!hw_direct_map) { size = moea64_pteg_count * sizeof(struct lpteg); - off = (vm_offset_t)(moea64_pteg_table); + off = (vm_offset_t)(isa3_hashtb_pteg_table); DISABLE_TRANS(msr); for (pa = off; pa < off + size; pa += PAGE_SIZE) pmap_kenter(pa, pa); @@ -519,43 +446,13 @@ static void tlbia(void) { vm_offset_t i; - #ifndef __powerpc64__ - register_t msr, scratch; - #endif i = 0xc00; /* IS = 11 */ - switch (mfpvr() >> 16) { - case IBM970: - case IBM970FX: - case IBM970MP: - case IBM970GX: - case IBMPOWER4: - case IBMPOWER4PLUS: - case IBMPOWER5: - case IBMPOWER5PLUS: - i = 0; /* IS not supported */ - break; - } TLBSYNC(); for (; i < 0x200000; i += 0x00001000) { - #ifdef __powerpc64__ __asm __volatile("tlbiel %0" :: "r"(i)); - #else - __asm __volatile("\ - mfmsr %0; \ - mr %1, %0; \ - insrdi %1,%3,1,0; \ - mtmsrd %1; \ - isync; \ - \ - tlbiel %2; \ - \ - mtmsrd %0; \ - isync;" - : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); - #endif } EIEIO(); @@ -598,7 +495,7 @@ atomic_pte_lock(volatile struct lpte *pte, uint64_t bi } static uintptr_t -moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase, +isa3_hashtb_insert_to_pteg(struct lpte *pvo_pt, uintptr_t slotbase, uint64_t mask) { volatile struct lpte *pt; @@ -610,7 +507,7 @@ moea64_insert_to_pteg_native(struct lpte *pvo_pt, uint i = mftb() % 8; for (j = 0; j < 8; j++) { k = slotbase + (i + j) % 8; - pt = &moea64_pteg_table[k]; + pt = &isa3_hashtb_pteg_table[k]; /* Invalidate and seize lock only if no bits in mask set */ if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ break; @@ -659,7 +556,7 @@ moea64_insert_to_pteg_native(struct lpte *pvo_pt, uint } static int -moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo) +isa3_hashtb_pte_insert(mmu_t mmu, struct pvo_entry *pvo) { struct lpte insertpt; uintptr_t slot; @@ -668,16 +565,16 @@ moea64_pte_insert_native(mmu_t mmu, struct pvo_entry * moea64_pte_from_pvo(pvo, &insertpt); /* Make sure further insertion is locked out during evictions */ - rw_rlock(&moea64_eviction_lock); + rw_rlock(&isa3_hashtb_eviction_lock); /* * First try primary hash. */ pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ - slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, + slot = isa3_hashtb_insert_to_pteg(&insertpt, pvo->pvo_pte.slot, LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); if (slot != -1) { - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); pvo->pvo_pte.slot = slot; return (0); } @@ -688,10 +585,10 @@ moea64_pte_insert_native(mmu_t mmu, struct pvo_entry * pvo->pvo_vaddr ^= PVO_HID; insertpt.pte_hi ^= LPTE_HID; pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); - slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, + slot = isa3_hashtb_insert_to_pteg(&insertpt, pvo->pvo_pte.slot, LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); if (slot != -1) { - rw_runlock(&moea64_eviction_lock); + rw_runlock(&isa3_hashtb_eviction_lock); pvo->pvo_pte.slot = slot; return (0); } @@ -701,15 +598,15 @@ moea64_pte_insert_native(mmu_t mmu, struct pvo_entry * */ /* Lock out all insertions for a bit */ - if (!rw_try_upgrade(&moea64_eviction_lock)) { - rw_runlock(&moea64_eviction_lock); - rw_wlock(&moea64_eviction_lock); + if (!rw_try_upgrade(&isa3_hashtb_eviction_lock)) { + rw_runlock(&isa3_hashtb_eviction_lock); + rw_wlock(&isa3_hashtb_eviction_lock); } - slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, + slot = isa3_hashtb_insert_to_pteg(&insertpt, pvo->pvo_pte.slot, LPTE_WIRED | LPTE_LOCKED); if (slot != -1) { - rw_wunlock(&moea64_eviction_lock); + rw_wunlock(&isa3_hashtb_eviction_lock); pvo->pvo_pte.slot = slot; return (0); } @@ -718,16 +615,16 @@ moea64_pte_insert_native(mmu_t mmu, struct pvo_entry * pvo->pvo_vaddr ^= PVO_HID; insertpt.pte_hi ^= LPTE_HID; pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); - slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, + slot = isa3_hashtb_insert_to_pteg(&insertpt, pvo->pvo_pte.slot, LPTE_WIRED | LPTE_LOCKED); if (slot != -1) { - rw_wunlock(&moea64_eviction_lock); + rw_wunlock(&isa3_hashtb_eviction_lock); pvo->pvo_pte.slot = slot; return (0); } /* No freeable slots in either PTEG? We're hosed. */ - rw_wunlock(&moea64_eviction_lock); + rw_wunlock(&isa3_hashtb_eviction_lock); panic("moea64_pte_insert: overflow"); return (-1); } Modified: head/sys/powerpc/aim/moea64_native.c ============================================================================== --- head/sys/powerpc/aim/moea64_native.c Thu Jun 14 17:21:09 2018 (r335159) +++ head/sys/powerpc/aim/moea64_native.c Thu Jun 14 17:23:51 2018 (r335160) @@ -192,7 +192,6 @@ TLBIE(uint64_t vpn) { /* * PTEG data. */ -static volatile struct pate *moea64_part_table; static volatile struct lpte *moea64_pteg_table; static struct rwlock moea64_eviction_lock; @@ -385,12 +384,6 @@ moea64_cpu_bootstrap_native(mmu_t mmup, int ap) mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); - switch (mfpvr() >> 16) { - case IBMPOWER9: - mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_RADIX); - break; - } - /* * Install kernel SLB entries */ @@ -416,15 +409,9 @@ moea64_cpu_bootstrap_native(mmu_t mmup, int ap) * Install page table */ - if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { - mtspr(SPR_PTCR, - ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) | - flsl((PART_SIZE >> 12) - 1)); - } else { - __asm __volatile ("ptesync; mtsdr1 %0; isync" - :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) - | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); - } + __asm __volatile ("ptesync; mtsdr1 %0; isync" + :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) + | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); tlbia(); } @@ -463,14 +450,6 @@ moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernel * allocate. We don't have BAT. So drop to data real mode for a minute * as a measure of last resort. We do this a couple times. */ - - if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { - moea64_part_table = - (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE); - if (hw_direct_map) - moea64_part_table = (struct pate *)PHYS_TO_DMAP( - (vm_offset_t)moea64_part_table); - } /* * PTEG table must be aligned on a 256k boundary, but can be placed * anywhere with that alignment on POWER ISA 3+ systems. On earlier @@ -485,12 +464,6 @@ moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernel moea64_pteg_table = (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table); DISABLE_TRANS(msr); - if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { - bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE); - moea64_part_table[0].pagetab = - ((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) | - (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11)); - } bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * sizeof(struct lpteg)); ENABLE_TRANS(msr); Modified: head/sys/powerpc/include/mmuvar.h ============================================================================== --- head/sys/powerpc/include/mmuvar.h Thu Jun 14 17:21:09 2018 (r335159) +++ head/sys/powerpc/include/mmuvar.h Thu Jun 14 17:23:51 2018 (r335160) @@ -116,6 +116,7 @@ DATA_SET(mmu_set, name) #define MMU_TYPE_BOOKE "mmu_booke" /* Book-E MMU specification */ #define MMU_TYPE_OEA "mmu_oea" /* 32-bit OEA */ #define MMU_TYPE_G5 "mmu_g5" /* 64-bit bridge (ibm 970) */ +#define MMU_TYPE_P9H "mmu_p9h" /* 64-bit native ISA 3.0 (POWER9) hash */ #define MMU_TYPE_8xx "mmu_8xx" /* 8xx quicc TLB */ #endif /* _MACHINE_MMUVAR_H_ */
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201806141723.w5EHNqG5098421>