From owner-svn-src-head@FreeBSD.ORG Sat Nov 3 22:02:13 2012 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 5D2C8B8E; Sat, 3 Nov 2012 22:02:13 +0000 (UTC) (envelope-from marcel@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 43F658FC08; Sat, 3 Nov 2012 22:02:13 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id qA3M2DpE053174; Sat, 3 Nov 2012 22:02:13 GMT (envelope-from marcel@svn.freebsd.org) Received: (from marcel@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id qA3M2Duo053169; Sat, 3 Nov 2012 22:02:13 GMT (envelope-from marcel@svn.freebsd.org) Message-Id: <201211032202.qA3M2Duo053169@svn.freebsd.org> From: Marcel Moolenaar Date: Sat, 3 Nov 2012 22:02:13 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r242526 - head/sys/powerpc/booke X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 03 Nov 2012 22:02:13 -0000 Author: marcel Date: Sat Nov 3 22:02:12 2012 New Revision: 242526 URL: http://svn.freebsd.org/changeset/base/242526 Log: 1. Have the APs initialize the TLB1 entries from what has been programmed on the BSP during (early) boot. This makes sure that the APs get configured the same as the BSP, irrspective of how FreeBSD was loaded. 2. Make sure to flush the dcache after writing the TLB1 entries to the boot page. The APs aren't part of the coherency domain just yet. 3. Set pmap_bootstrapped after calling pmap_bootstrap(). The FDT code now maps the devices (like OF), and this resulted in a panic. 4. Since we pre-wire the CCSR, make sure not to map chunks of it in pmap_mapdev(). Modified: head/sys/powerpc/booke/locore.S head/sys/powerpc/booke/machdep.c head/sys/powerpc/booke/platform_bare.c head/sys/powerpc/booke/pmap.c Modified: head/sys/powerpc/booke/locore.S ============================================================================== --- head/sys/powerpc/booke/locore.S Sat Nov 3 21:20:55 2012 (r242525) +++ head/sys/powerpc/booke/locore.S Sat Nov 3 22:02:12 2012 (r242526) @@ -126,9 +126,11 @@ __start: bl tlb1_find_current /* the entry found is returned in r29 */ bl tlb1_inval_all_but_current + /* * Create temporary mapping in AS=1 and switch to it */ + addi %r3, %r29, 1 bl tlb1_temp_mapping_as1 mfmsr %r3 @@ -242,19 +244,21 @@ done_mapping: __boot_page: bl 1f - .globl bp_trace -bp_trace: + .globl bp_ntlb1s +bp_ntlb1s: .long 0 - .globl bp_kernload -bp_kernload: - .long 0 + .globl bp_tlb1 +bp_tlb1: + .space 4 * 3 * 16 + + .globl bp_tlb1_end +bp_tlb1_end: /* * Initial configuration */ -1: - mflr %r31 /* r31 hold the address of bp_trace */ +1: mflr %r31 /* r31 hold the address of bp_ntlb1s */ /* Set HIDs */ lis %r3, HID0_E500_DEFAULT_SET@h @@ -283,9 +287,11 @@ bp_kernload: bl tlb1_find_current /* the entry number found is in r29 */ bl tlb1_inval_all_but_current + /* * Create temporary translation in AS=1 and switch to it */ + lwz %r3, 0(%r31) bl tlb1_temp_mapping_as1 mfmsr %r3 @@ -306,44 +312,34 @@ bp_kernload: /* * Setup final mapping in TLB1[1] and switch to it */ - /* Final kernel mapping, map in 16 MB of RAM */ - lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ - li %r4, 0 /* Entry 0 */ - rlwimi %r3, %r4, 16, 4, 15 + lwz %r6, 0(%r31) + addi %r5, %r31, 4 + li %r4, 0 + +4: lis %r3, MAS0_TLBSEL1@h + rlwimi %r3, %r4, 16, 12, 15 mtspr SPR_MAS0, %r3 isync - - li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l - oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h - mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ + lwz %r3, 0(%r5) + mtspr SPR_MAS1, %r3 isync - - lis %r3, KERNBASE@h - ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */ - ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */ + lwz %r3, 4(%r5) mtspr SPR_MAS2, %r3 isync - - /* Retrieve kernel load [physical] address from bp_kernload */ - bl 4f -4: mflr %r3 - rlwinm %r3, %r3, 0, 0, 19 - lis %r4, bp_kernload@h - ori %r4, %r4, bp_kernload@l - lis %r5, __boot_page@h - ori %r5, %r5, __boot_page@l - sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */ - lwzx %r3, %r4, %r3 - - /* Set RPN and protection */ - ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l + lwz %r3, 8(%r5) mtspr SPR_MAS3, %r3 isync tlbwe isync msync + addi %r5, %r5, 12 + addi %r4, %r4, 1 + cmpw %r4, %r6 + blt 4b /* Switch to the final mapping */ + lis %r5, __boot_page@ha + ori %r5, %r5, __boot_page@l bl 5f 5: mflr %r3 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ @@ -460,11 +456,14 @@ tlb1_inval_entry: blr /* - * r29 current entry number - * r28 returned temp entry - * r3-r5 scratched + * r3 entry of temp translation + * r29 entry of current translation + * r28 returns temp entry passed in r3 + * r4-r5 scratched */ tlb1_temp_mapping_as1: + mr %r28, %r3 + /* Read our current translation */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ rlwimi %r3, %r29, 16, 12, 15 /* Select our current entry */ @@ -472,14 +471,8 @@ tlb1_temp_mapping_as1: isync tlbre - /* - * Prepare and write temp entry - * - * FIXME this is not robust against overflow i.e. when the current - * entry is the last in TLB1 - */ + /* Prepare and write temp entry */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ - addi %r28, %r29, 1 /* Use next entry. */ rlwimi %r3, %r28, 16, 12, 15 /* Select temp entry */ mtspr SPR_MAS0, %r3 isync Modified: head/sys/powerpc/booke/machdep.c ============================================================================== --- head/sys/powerpc/booke/machdep.c Sat Nov 3 21:20:55 2012 (r242525) +++ head/sys/powerpc/booke/machdep.c Sat Nov 3 22:02:12 2012 (r242526) @@ -413,6 +413,7 @@ booke_init(uint32_t arg1, uint32_t arg2) /* Initialise virtual memory. */ pmap_mmu_install(MMU_TYPE_BOOKE, 0); pmap_bootstrap((uintptr_t)kernel_text, end); + pmap_bootstrapped = 1; debugf("MSR = 0x%08x\n", mfmsr()); #if defined(BOOKE_E500) //tlb1_print_entries(); Modified: head/sys/powerpc/booke/platform_bare.c ============================================================================== --- head/sys/powerpc/booke/platform_bare.c Sat Nov 3 21:20:55 2012 (r242525) +++ head/sys/powerpc/booke/platform_bare.c Sat Nov 3 22:02:12 2012 (r242526) @@ -55,9 +55,11 @@ __FBSDID("$FreeBSD$"); #ifdef SMP extern void *ap_pcpu; +extern vm_paddr_t kernload; /* Kernel physical load address */ extern uint8_t __boot_page[]; /* Boot page body */ -extern uint32_t bp_kernload; /* Kernel physical load address */ -extern uint32_t bp_trace; /* AP boot trace field */ +extern uint32_t bp_ntlb1s; +extern uint32_t bp_tlb1[]; +extern uint32_t bp_tlb1_end[]; #endif extern uint32_t *bootinfo; @@ -248,8 +250,9 @@ static int bare_smp_start_cpu(platform_t plat, struct pcpu *pc) { #ifdef SMP + uint32_t *tlb1; uint32_t bptr, eebpcr; - int timeout; + int i, timeout; eebpcr = ccsr_read4(OCP85XX_EEBPCR); if ((eebpcr & (1 << (pc->pc_cpuid + 24))) != 0) { @@ -259,18 +262,37 @@ bare_smp_start_cpu(platform_t plat, stru } ap_pcpu = pc; - __asm __volatile("msync; isync"); + + i = 0; + tlb1 = bp_tlb1; + while (i < bp_ntlb1s && tlb1 < bp_tlb1_end) { + mtspr(SPR_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(i)); + __asm __volatile("isync; tlbre"); + tlb1[0] = mfspr(SPR_MAS1); + tlb1[1] = mfspr(SPR_MAS2); + tlb1[2] = mfspr(SPR_MAS3); + i++; + tlb1 += 3; + } + if (i < bp_ntlb1s) + bp_ntlb1s = i; /* * Set BPTR to the physical address of the boot page */ - bptr = ((uint32_t)__boot_page - KERNBASE) + bp_kernload; - ccsr_write4(OCP85XX_BPTR, (bptr >> 12) | 0x80000000); + bptr = ((uint32_t)__boot_page - KERNBASE) + kernload; + KASSERT((bptr & 0xfff) == 0, + ("%s: boot page is not aligned (%#x)", __func__, bptr)); + bptr = (bptr >> 12) | 0x80000000u; + ccsr_write4(OCP85XX_BPTR, bptr); + __asm __volatile("isync; msync"); + + /* Flush caches to have our changes hit DRAM. */ + cpu_flush_dcache(__boot_page, 4096); /* * Release AP from hold-off state */ - bp_trace = 0; eebpcr |= (1 << (pc->pc_cpuid + 24)); ccsr_write4(OCP85XX_EEBPCR, eebpcr); __asm __volatile("isync; msync"); @@ -285,10 +307,10 @@ bare_smp_start_cpu(platform_t plat, stru * usable otherwise. */ ccsr_write4(OCP85XX_BPTR, 0); + __asm __volatile("isync; msync"); if (!pc->pc_awake) - printf("SMP: CPU %d didn't wake up (trace code %#x).\n", - pc->pc_awake, bp_trace); + printf("SMP: CPU %d didn't wake up.\n", pc->pc_cpuid); return ((pc->pc_awake) ? 0 : EBUSY); #else /* No SMP support */ Modified: head/sys/powerpc/booke/pmap.c ============================================================================== --- head/sys/powerpc/booke/pmap.c Sat Nov 3 21:20:55 2012 (r242525) +++ head/sys/powerpc/booke/pmap.c Sat Nov 3 22:02:12 2012 (r242526) @@ -111,9 +111,10 @@ extern unsigned char _end[]; extern uint32_t *bootinfo; #ifdef SMP -extern uint32_t bp_kernload; +extern uint32_t bp_ntlb1s; #endif +vm_paddr_t ccsrbar_pa; vm_paddr_t kernload; vm_offset_t kernstart; vm_size_t kernsize; @@ -962,10 +963,6 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset debugf("mmu_booke_bootstrap: entered\n"); -#ifdef SMP - bp_kernload = kernload; -#endif - /* Initialize invalidation mutex */ mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); @@ -1279,7 +1276,7 @@ pmap_bootstrap_ap(volatile uint32_t *trc * have the snapshot of its contents in the s/w tlb1[] table, so use * these values directly to (re)program AP's TLB1 hardware. */ - for (i = 0; i < tlb1_idx; i ++) { + for (i = bp_ntlb1s; i < tlb1_idx; i++) { /* Skip invalid entries */ if (!(tlb1[i].mas1 & MAS1_VALID)) continue; @@ -2601,6 +2598,18 @@ mmu_booke_mapdev(mmu_t mmu, vm_paddr_t p uintptr_t va; vm_size_t sz; + /* + * CCSR is premapped. Note that (pa + size - 1) is there to make sure + * we don't wrap around. Devices on the local bus typically extend all + * the way up to and including 0xffffffff. In that case (pa + size) + * would be 0. This creates a false positive (i.e. we think it's + * within the CCSR) and not create a mapping. + */ + if (pa >= ccsrbar_pa && (pa + size - 1) < (ccsrbar_pa + CCSRBAR_SIZE)) { + va = CCSRBAR_VA + (pa - ccsrbar_pa); + return ((void *)va); + } + va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); res = (void *)va; @@ -3011,6 +3020,8 @@ tlb1_init(vm_offset_t ccsrbar) uint32_t tsz; u_int i; + ccsrbar_pa = ccsrbar; + if (bootinfo != NULL && bootinfo[0] != 1) { tlb1_idx = *((uint16_t *)(bootinfo + 8)); } else @@ -3042,6 +3053,10 @@ tlb1_init(vm_offset_t ccsrbar) /* Map in CCSRBAR. */ tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); +#ifdef SMP + bp_ntlb1s = tlb1_idx; +#endif + /* Purge the remaining entries */ for (i = tlb1_idx; i < TLB1_ENTRIES; i++) tlb1_write_entry(i);