From owner-svn-src-all@FreeBSD.ORG Thu May 24 20:58:41 2012 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 80E06106566C; Thu, 24 May 2012 20:58:41 +0000 (UTC) (envelope-from marcel@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 6A7B58FC08; Thu, 24 May 2012 20:58:41 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q4OKwfff059928; Thu, 24 May 2012 20:58:41 GMT (envelope-from marcel@svn.freebsd.org) Received: (from marcel@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q4OKwfmE059924; Thu, 24 May 2012 20:58:41 GMT (envelope-from marcel@svn.freebsd.org) Message-Id: <201205242058.q4OKwfmE059924@svn.freebsd.org> From: Marcel Moolenaar Date: Thu, 24 May 2012 20:58:41 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r235932 - head/sys/powerpc/booke X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 24 May 2012 20:58:41 -0000 Author: marcel Date: Thu May 24 20:58:40 2012 New Revision: 235932 URL: http://svn.freebsd.org/changeset/base/235932 Log: o Rename kernload_ap to bp_kernelload. This to introduce a common prefix for variables that live in the boot page. o Add bp_trace (yes, it's in the boot page) that gets zeroed before we try to wake a core and to which the core being woken can write markers so that we know where the core was in case it doesn't wake up. The boot code does not yet write markers (too follow). o Disable the boot page translation to allow the last 4K page to be used for whatever we please. It would get mapped otherwise. o Fix kernstart in the case of SMP. The start argument is typically page aligned due to the alignment requirements that come with having a boot page. The point of using trunc_page is that we get the actual load address given that the entry point is immediately following the ELF headers. In the SMP case this ended up exactly 4K after the load address. Hence subtracting 1 from start. Modified: head/sys/powerpc/booke/locore.S head/sys/powerpc/booke/platform_bare.c head/sys/powerpc/booke/pmap.c Modified: head/sys/powerpc/booke/locore.S ============================================================================== --- head/sys/powerpc/booke/locore.S Thu May 24 20:45:44 2012 (r235931) +++ head/sys/powerpc/booke/locore.S Thu May 24 20:58:40 2012 (r235932) @@ -242,14 +242,20 @@ done_mapping: __boot_page: bl 1f - .globl kernload_ap -kernload_ap: + .globl bp_trace +bp_trace: + .long 0 + + .globl bp_kernload +bp_kernload: .long 0 /* * Initial configuration */ 1: + mflr %r31 /* r31 hold the address of bp_trace */ + /* Set HIDs */ lis %r3, HID0_E500_DEFAULT_SET@h ori %r3, %r3, HID0_E500_DEFAULT_SET@l @@ -318,15 +324,15 @@ kernload_ap: mtspr SPR_MAS2, %r3 isync - /* Retrieve kernel load [physical] address from kernload_ap */ + /* Retrieve kernel load [physical] address from bp_kernload */ bl 4f 4: mflr %r3 rlwinm %r3, %r3, 0, 0, 19 - lis %r4, kernload_ap@h - ori %r4, %r4, kernload_ap@l + lis %r4, bp_kernload@h + ori %r4, %r4, bp_kernload@l lis %r5, __boot_page@h ori %r5, %r5, __boot_page@l - sub %r4, %r4, %r5 /* offset of kernload_ap within __boot_page */ + sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */ lwzx %r3, %r4, %r3 /* Set RPN and protection */ Modified: head/sys/powerpc/booke/platform_bare.c ============================================================================== --- head/sys/powerpc/booke/platform_bare.c Thu May 24 20:45:44 2012 (r235931) +++ head/sys/powerpc/booke/platform_bare.c Thu May 24 20:58:40 2012 (r235932) @@ -56,7 +56,8 @@ __FBSDID("$FreeBSD$"); #ifdef SMP extern void *ap_pcpu; extern uint8_t __boot_page[]; /* Boot page body */ -extern uint32_t kernload_ap; /* Kernel physical load address */ +extern uint32_t bp_kernload; /* Kernel physical load address */ +extern uint32_t bp_trace; /* AP boot trace field */ #endif extern uint32_t *bootinfo; @@ -262,8 +263,8 @@ bare_smp_start_cpu(platform_t plat, stru eebpcr = ccsr_read4(OCP85XX_EEBPCR); if ((eebpcr & (1 << (pc->pc_cpuid + 24))) != 0) { - printf("%s: CPU=%d already out of hold-off state!\n", - __func__, pc->pc_cpuid); + printf("SMP: CPU %d already out of hold-off state!\n", + pc->pc_cpuid); return (ENXIO); } @@ -273,12 +274,13 @@ bare_smp_start_cpu(platform_t plat, stru /* * Set BPTR to the physical address of the boot page */ - bptr = ((uint32_t)__boot_page - KERNBASE) + kernload_ap; + bptr = ((uint32_t)__boot_page - KERNBASE) + bp_kernload; ccsr_write4(OCP85XX_BPTR, (bptr >> 12) | 0x80000000); /* * Release AP from hold-off state */ + bp_trace = 0; eebpcr |= (1 << (pc->pc_cpuid + 24)); ccsr_write4(OCP85XX_EEBPCR, eebpcr); __asm __volatile("isync; msync"); @@ -287,6 +289,16 @@ bare_smp_start_cpu(platform_t plat, stru while (!pc->pc_awake && timeout--) DELAY(1000); /* wait 1ms */ + /* + * Disable boot page translation so that the 4K page at the default + * address (= 0xfffff000) isn't permanently remapped and thus not + * usable otherwise. + */ + ccsr_write4(OCP85XX_BPTR, 0); + + if (!pc->pc_awake) + printf("SMP: CPU %d didn't wake up (trace code %#x).\n", + pc->pc_awake, bp_trace); return ((pc->pc_awake) ? 0 : EBUSY); #else /* No SMP support */ Modified: head/sys/powerpc/booke/pmap.c ============================================================================== --- head/sys/powerpc/booke/pmap.c Thu May 24 20:45:44 2012 (r235931) +++ head/sys/powerpc/booke/pmap.c Thu May 24 20:58:40 2012 (r235932) @@ -115,7 +115,7 @@ extern unsigned char _end[]; extern uint32_t *bootinfo; #ifdef SMP -extern uint32_t kernload_ap; +extern uint32_t bp_kernload; #endif vm_paddr_t kernload; @@ -967,10 +967,9 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset debugf("mmu_booke_bootstrap: entered\n"); #ifdef SMP - kernload_ap = kernload; + bp_kernload = kernload; #endif - /* Initialize invalidation mutex */ mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); @@ -981,8 +980,13 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset * Align kernel start and end address (kernel image). * Note that kernel end does not necessarily relate to kernsize. * kernsize is the size of the kernel that is actually mapped. + * Also note that "start - 1" is deliberate. With SMP, the + * entry point is exactly a page from the actual load address. + * As such, trunc_page() has no effect and we're off by a page. + * Since we always have the ELF header between the load address + * and the entry point, we can safely subtract 1 to compensate. */ - kernstart = trunc_page(start); + kernstart = trunc_page(start - 1); data_start = round_page(kernelend); data_end = data_start; @@ -1233,9 +1237,9 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset * entries, but for pte_vatopa() to work correctly with kernel area * addresses. */ - for (va = KERNBASE; va < data_end; va += PAGE_SIZE) { + for (va = kernstart; va < data_end; va += PAGE_SIZE) { pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); - pte->rpn = kernload + (va - KERNBASE); + pte->rpn = kernload + (va - kernstart); pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; } @@ -1397,9 +1401,7 @@ mmu_booke_kenter(mmu_t mmu, vm_offset_t KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); - flags = 0; - flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); - flags |= PTE_M; + flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);