Date: Sat, 28 Nov 2009 19:37:58 +0000 (UTC) From: Nathan Whitehorn <nwhitehorn@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org Subject: svn commit: r199891 - in stable/8/sys/powerpc: aim booke include powerpc Message-ID: <200911281937.nASJbwbl065123@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: nwhitehorn Date: Sat Nov 28 19:37:58 2009 New Revision: 199891 URL: http://svn.freebsd.org/changeset/base/199891 Log: MFC r198212,198378,198427,198428,198723,198724,198725,198731: SMP support for PowerPC G5 systems. r198724: Fix a race in casuword() exposed by csup. casuword() non-atomically read the current value of its argument before atomically replacing it, which could occasionally return the wrong value on an SMP system. This resulted in user mutex operations hanging when using threaded applications. r198723,198725,198731: Loop on blocked threads when using ULE scheduler, removing an XXX MP comment. r198427: Add some more paranoia to setting HID registers, and update the AIM clock routines to work better with SMP. r198378: Add SMP support on U3-based G5 systems. While here, correct the 64-bit tlbie function to set the CPU to 64-bit mode correctly. r198212: Don't assume that physical addresses are identity mapped. This allows the second processor on G5 systems to start. Modified: stable/8/sys/powerpc/aim/clock.c stable/8/sys/powerpc/aim/copyinout.c stable/8/sys/powerpc/aim/machdep.c stable/8/sys/powerpc/aim/mmu_oea64.c stable/8/sys/powerpc/aim/mp_cpudep.c stable/8/sys/powerpc/aim/platform_chrp.c stable/8/sys/powerpc/aim/swtch.S stable/8/sys/powerpc/booke/mp_cpudep.c stable/8/sys/powerpc/include/pcpu.h stable/8/sys/powerpc/include/smp.h stable/8/sys/powerpc/include/spr.h stable/8/sys/powerpc/powerpc/cpu.c stable/8/sys/powerpc/powerpc/mp_machdep.c Directory Properties: stable/8/sys/ (props changed) stable/8/sys/amd64/include/xen/ (props changed) stable/8/sys/cddl/contrib/opensolaris/ (props changed) stable/8/sys/contrib/dev/acpica/ (props changed) stable/8/sys/contrib/pf/ (props changed) stable/8/sys/dev/xen/xenpci/ (props changed) Modified: stable/8/sys/powerpc/aim/clock.c ============================================================================== --- stable/8/sys/powerpc/aim/clock.c Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/aim/clock.c Sat Nov 28 19:37:58 2009 (r199891) @@ -95,8 +95,7 @@ static struct timecounter decr_timecount void decr_intr(struct trapframe *frame) { - long tick; - int nticks; + int32_t tick, nticks; /* * Check whether we are initialized. @@ -113,12 +112,15 @@ decr_intr(struct trapframe *frame) tick += ticks_per_intr; mtdec(tick); - if (PCPU_GET(cpuid) == 0) { - while (nticks-- > 0) + while (nticks-- > 0) { + if (PCPU_GET(cpuid) == 0) hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); - } else { - while (nticks-- > 0) + else hardclock_cpu(TRAPF_USERMODE(frame)); + + statclock(TRAPF_USERMODE(frame)); + if (profprocs != 0) + profclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); } } @@ -145,6 +147,8 @@ decr_init(void) ticks_per_intr = ticks_per_sec / hz; mtdec(ticks_per_intr); + set_cputicker(mftb, ticks_per_sec, 0); + mtmsr(msr); } Modified: stable/8/sys/powerpc/aim/copyinout.c ============================================================================== --- stable/8/sys/powerpc/aim/copyinout.c Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/aim/copyinout.c Sat Nov 28 19:37:58 2009 (r199891) @@ -347,8 +347,19 @@ casuword(volatile u_long *addr, u_long o return (-1); } - val = *p; - (void) atomic_cmpset_32((volatile uint32_t *)p, old, new); + __asm __volatile ( + "1:\tlwarx %0, 0, %2\n\t" /* load old value */ + "cmplw %3, %0\n\t" /* compare */ + "bne 2f\n\t" /* exit if not equal */ + "stwcx. %4, 0, %2\n\t" /* attempt to store */ + "bne- 1b\n\t" /* spin if failed */ + "b 3f\n\t" /* we've succeeded */ + "2:\n\t" + "stwcx. %0, 0, %2\n\t" /* clear reservation (74xx) */ + "3:\n\t" + : "=&r" (val), "=m" (*p) + : "r" (p), "r" (old), "r" (new), "m" (*p) + : "cc", "memory"); td->td_pcb->pcb_onfault = NULL; Modified: stable/8/sys/powerpc/aim/machdep.c ============================================================================== --- stable/8/sys/powerpc/aim/machdep.c Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/aim/machdep.c Sat Nov 28 19:37:58 2009 (r199891) @@ -885,6 +885,8 @@ cpu_initclocks(void) { decr_tc_init(); + stathz = hz; + profhz = hz; } /* Modified: stable/8/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- stable/8/sys/powerpc/aim/mmu_oea64.c Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/aim/mmu_oea64.c Sat Nov 28 19:37:58 2009 (r199891) @@ -182,35 +182,28 @@ va_to_vsid(pmap_t pm, vm_offset_t va) * Just to add to the fun, exceptions must be off as well * so that we can't trap in 64-bit mode. What a pain. */ +struct mtx tlbie_mutex; static __inline void TLBIE(pmap_t pmap, vm_offset_t va) { - register_t msr; - register_t scratch; - uint64_t vpn; register_t vpn_hi, vpn_lo; - -#if 1 - /* - * CPU documentation says that tlbie takes the VPN, not the - * VA. I think the code below does this correctly. We will see. - */ + register_t msr; + register_t scratch; vpn = (uint64_t)(va & ADDR_PIDX); if (pmap != NULL) vpn |= (va_to_vsid(pmap,va) << 28); -#else - vpn = va; -#endif vpn_hi = (uint32_t)(vpn >> 32); vpn_lo = (uint32_t)vpn; + mtx_lock_spin(&tlbie_mutex); __asm __volatile("\ mfmsr %0; \ clrldi %1,%0,49; \ - insrdi %1,1,1,0; \ + mtmsr %1; \ + insrdi %1,%5,1,0; \ mtmsrd %1; \ ptesync; \ \ @@ -222,7 +215,8 @@ TLBIE(pmap_t pmap, vm_offset_t va) { eieio; \ tlbsync; \ ptesync;" - : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32)); + : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)); + mtx_unlock_spin(&tlbie_mutex); } #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() @@ -352,7 +346,7 @@ static int moea64_pte_insert(u_int, str * PVO calls. */ static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, - vm_offset_t, vm_offset_t, uint64_t, int, int); + vm_offset_t, vm_offset_t, uint64_t, int); static void moea64_pvo_remove(struct pvo_entry *, int); static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *); static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int); @@ -825,6 +819,11 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_o MTX_RECURSE); /* + * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. + */ + mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); + + /* * Initialise the unmanaged pvo pool. */ moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( @@ -1259,7 +1258,7 @@ moea64_enter_locked(pmap_t pmap, vm_offs pvo_flags |= PVO_FAKE; error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), - pte_lo, pvo_flags, 0); + pte_lo, pvo_flags); if (pmap == kernel_pmap) TLBIE(pmap, va); @@ -1432,16 +1431,15 @@ moea64_uma_page_alloc(uma_zone_t zone, i if (pvo_allocator_start >= pvo_allocator_end) panic("Ran out of PVO allocator buffer space!"); - /* Now call pvo_enter in recursive mode */ moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, - PVO_WIRED | PVO_BOOTSTRAP, 1); + PVO_WIRED | PVO_BOOTSTRAP); TLBIE(kernel_pmap, va); - + if (needed_lock) PMAP_UNLOCK(kernel_pmap); - + if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) bzero((void *)va, PAGE_SIZE); @@ -1584,7 +1582,7 @@ moea64_kenter(mmu_t mmu, vm_offset_t va, PMAP_LOCK(kernel_pmap); error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, &moea64_pvo_kunmanaged, va, pa, pte_lo, - PVO_WIRED | VM_PROT_EXECUTE, 0); + PVO_WIRED | VM_PROT_EXECUTE); TLBIE(kernel_pmap, va); @@ -1972,14 +1970,29 @@ static void tlbia(void) { vm_offset_t i; + register_t msr, scratch; - for (i = 0; i < 0xFF000; i += 0x00001000) - TLBIE(NULL,i); + for (i = 0; i < 0xFF000; i += 0x00001000) { + __asm __volatile("\ + mfmsr %0; \ + mr %1, %0; \ + insrdi %1,%3,1,0; \ + mtmsrd %1; \ + ptesync; \ + \ + tlbiel %2; \ + \ + mtmsrd %0; \ + eieio; \ + tlbsync; \ + ptesync;" + : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); + } } static int moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, - vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags, int recurse) + vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) { struct pvo_entry *pvo; uint64_t vsid; @@ -2015,16 +2028,14 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z * Remove any existing mapping for this page. Reuse the pvo entry if * there is a mapping. */ - if (!recurse) - LOCK_TABLE(); + LOCK_TABLE(); LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == (pte_lo & LPTE_PP)) { - if (!recurse) - UNLOCK_TABLE(); + UNLOCK_TABLE(); return (0); } moea64_pvo_remove(pvo, -1); @@ -2045,12 +2056,19 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z moea64_bpvo_pool_index++; bootstrap = 1; } else { + /* + * Note: drop the table around the UMA allocation in + * case the UMA allocator needs to manipulate the page + * table. The mapping we are working with is already + * protected by the PMAP lock. + */ + UNLOCK_TABLE(); pvo = uma_zalloc(zone, M_NOWAIT); + LOCK_TABLE(); } if (pvo == NULL) { - if (!recurse) - UNLOCK_TABLE(); + UNLOCK_TABLE(); return (ENOMEM); } @@ -2097,8 +2115,7 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z moea64_pte_overflow++; } - if (!recurse) - UNLOCK_TABLE(); + UNLOCK_TABLE(); return (first ? ENOENT : 0); } Modified: stable/8/sys/powerpc/aim/mp_cpudep.c ============================================================================== --- stable/8/sys/powerpc/aim/mp_cpudep.c Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/aim/mp_cpudep.c Sat Nov 28 19:37:58 2009 (r199891) @@ -48,14 +48,34 @@ __FBSDID("$FreeBSD$"); #include <dev/ofw/openfirm.h> #include <machine/ofw_machdep.h> -extern void *rstcode; -extern register_t l2cr_config; -extern register_t l3cr_config; - void *ap_pcpu; +static register_t bsp_state[8] __aligned(8); + +static void cpudep_save_config(void *dummy); +SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL); + +uintptr_t +cpudep_ap_bootstrap(void) +{ + register_t msr, sp; + + msr = PSL_KERNSET & ~PSL_EE; + mtmsr(msr); + isync(); + + __asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu)); + powerpc_sync(); + + pcpup->pc_curthread = pcpup->pc_idlethread; + pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb; + sp = pcpup->pc_curpcb->pcb_sp; + + return (sp); +} + static register_t -l2_enable(void) +mpc745x_l2_enable(register_t l2cr_config) { register_t ccr; @@ -77,7 +97,7 @@ l2_enable(void) } static register_t -l3_enable(void) +mpc745x_l3_enable(register_t l3cr_config) { register_t ccr; @@ -109,7 +129,7 @@ l3_enable(void) } static register_t -l1d_enable(void) +mpc745x_l1d_enable(void) { register_t hid; @@ -127,7 +147,7 @@ l1d_enable(void) } static register_t -l1i_enable(void) +mpc745x_l1i_enable(void) { register_t hid; @@ -144,43 +164,118 @@ l1i_enable(void) return (hid); } -uint32_t -cpudep_ap_bootstrap(void) +static void +cpudep_save_config(void *dummy) { - uint32_t hid, msr, reg, sp; - - // reg = mfspr(SPR_MSSCR0); - // mtspr(SPR_MSSCR0, reg | 0x3); - - __asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu)); - powerpc_sync(); + uint16_t vers; - __asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid))); - __asm __volatile("mfspr %0,1023" : "=r"(pcpup->pc_pir)); - - msr = PSL_FP | PSL_IR | PSL_DR | PSL_ME | PSL_RI; - powerpc_sync(); - isync(); - mtmsr(msr); - isync(); + vers = mfpvr() >> 16; - if (l3cr_config != 0) - reg = l3_enable(); - if (l2cr_config != 0) - reg = l2_enable(); - reg = l1d_enable(); - reg = l1i_enable(); - - hid = mfspr(SPR_HID0); - hid &= ~(HID0_DOZE | HID0_SLEEP); - hid |= HID0_NAP | HID0_DPM; - mtspr(SPR_HID0, hid); - isync(); - - pcpup->pc_curthread = pcpup->pc_idlethread; - pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb; - sp = pcpup->pc_curpcb->pcb_sp; + switch(vers) { + case IBM970: + case IBM970FX: + case IBM970MP: + __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" + : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0)); + __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" + : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1)); + __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" + : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4)); + __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" + : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5)); + + powerpc_sync(); + + break; + case MPC7450: + case MPC7455: + case MPC7457: + /* Only MPC745x CPUs have an L3 cache. */ + bsp_state[3] = mfspr(SPR_L3CR); + + /* Fallthrough */ + case MPC7400: + case MPC7410: + case MPC7447A: + case MPC7448: + bsp_state[2] = mfspr(SPR_L2CR); + bsp_state[1] = mfspr(SPR_HID1); + bsp_state[0] = mfspr(SPR_HID0); + break; + } +} - return (sp); +void +cpudep_ap_setup() +{ + register_t reg; + uint16_t vers; + + vers = mfpvr() >> 16; + + switch(vers) { + case IBM970: + case IBM970FX: + case IBM970MP: + /* Set HIOR to 0 */ + __asm __volatile("mtspr 311,%0" :: "r"(0)); + powerpc_sync(); + + /* + * The 970 has strange rules about how to update HID registers. + * See Table 2-3, 970MP manual + */ + + __asm __volatile("mtasr %0; sync" :: "r"(0)); + __asm __volatile(" \ + ld %0,0(%2); \ + sync; isync; \ + mtspr %1, %0; \ + mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ + mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ + sync; isync" + : "=r"(reg) : "K"(SPR_HID0), "r"(bsp_state)); + __asm __volatile("ld %0, 8(%2); sync; isync; \ + mtspr %1, %0; mtspr %1, %0; sync; isync" + : "=r"(reg) : "K"(SPR_HID1), "r"(bsp_state)); + __asm __volatile("ld %0, 16(%2); sync; isync; \ + mtspr %1, %0; sync; isync;" + : "=r"(reg) : "K"(SPR_HID4), "r"(bsp_state)); + __asm __volatile("ld %0, 24(%2); sync; isync; \ + mtspr %1, %0; sync; isync;" + : "=r"(reg) : "K"(SPR_HID5), "r"(bsp_state)); + + powerpc_sync(); + break; + case MPC7450: + case MPC7455: + case MPC7457: + /* Only MPC745x CPUs have an L3 cache. */ + reg = mpc745x_l3_enable(bsp_state[3]); + + /* Fallthrough */ + case MPC7400: + case MPC7410: + case MPC7447A: + case MPC7448: + /* XXX: Program the CPU ID into PIR */ + __asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid))); + + powerpc_sync(); + isync(); + + mtspr(SPR_HID0, bsp_state[0]); isync(); + mtspr(SPR_HID1, bsp_state[1]); isync(); + + reg = mpc745x_l2_enable(bsp_state[2]); + reg = mpc745x_l1d_enable(); + reg = mpc745x_l1i_enable(); + + break; + default: + printf("WARNING: Unknown CPU type. Cache performace may be " + "suboptimal.\n"); + break; + } } Modified: stable/8/sys/powerpc/aim/platform_chrp.c ============================================================================== --- stable/8/sys/powerpc/aim/platform_chrp.c Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/aim/platform_chrp.c Sat Nov 28 19:37:58 2009 (r199891) @@ -35,11 +35,14 @@ __FBSDID("$FreeBSD$"); #include <sys/pcpu.h> #include <sys/proc.h> #include <sys/smp.h> +#include <vm/vm.h> +#include <vm/pmap.h> #include <machine/bus.h> #include <machine/cpu.h> #include <machine/hid.h> #include <machine/platformvar.h> +#include <machine/pmap.h> #include <machine/smp.h> #include <machine/spr.h> @@ -220,6 +223,7 @@ chrp_smp_start_cpu(platform_t plat, stru #ifdef SMP phandle_t cpu; volatile uint8_t *rstvec; + static volatile uint8_t *rstvec_virtbase = NULL; int res, reset, timeout; cpu = pc->pc_hwref; @@ -229,15 +233,20 @@ chrp_smp_start_cpu(platform_t plat, stru ap_pcpu = pc; - rstvec = (uint8_t *)(0x80000000 + reset); + if (rstvec_virtbase == NULL) + rstvec_virtbase = pmap_mapdev(0x80000000, PAGE_SIZE); + + rstvec = rstvec_virtbase + reset; *rstvec = 4; + (void)(*rstvec); powerpc_sync(); DELAY(1); *rstvec = 0; + (void)(*rstvec); powerpc_sync(); - timeout = 1000; + timeout = 10000; while (!pc->pc_awake && timeout--) DELAY(100); Modified: stable/8/sys/powerpc/aim/swtch.S ============================================================================== --- stable/8/sys/powerpc/aim/swtch.S Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/aim/swtch.S Sat Nov 28 19:37:58 2009 (r199891) @@ -57,6 +57,7 @@ */ #include "assym.s" +#include "opt_sched.h" #include <sys/syscall.h> @@ -81,36 +82,36 @@ ENTRY(cpu_throw) * Switch to a new thread saving the current state in the old thread. */ ENTRY(cpu_switch) - stw %r5,TD_LOCK(%r3) /* ULE: update old thread's lock */ - /* XXX needs to change for MP */ - - lwz %r5,TD_PCB(%r3) /* Get the old thread's PCB ptr */ + lwz %r6,TD_PCB(%r3) /* Get the old thread's PCB ptr */ mr %r12,%r2 - stmw %r12,PCB_CONTEXT(%r5) /* Save the non-volatile GP regs. + stmw %r12,PCB_CONTEXT(%r6) /* Save the non-volatile GP regs. These can now be used for scratch */ mfcr %r16 /* Save the condition register */ - stw %r16,PCB_CR(%r5) + stw %r16,PCB_CR(%r6) mflr %r16 /* Save the link register */ - stw %r16,PCB_LR(%r5) + stw %r16,PCB_LR(%r6) mfsr %r16,USER_SR /* Save USER_SR for copyin/out */ isync - stw %r16,PCB_AIM_USR(%r5) - stw %r1,PCB_SP(%r5) /* Save the stack pointer */ + stw %r16,PCB_AIM_USR(%r6) + stw %r1,PCB_SP(%r6) /* Save the stack pointer */ mr %r14,%r3 /* Copy the old thread ptr... */ mr %r15,%r4 /* and the new thread ptr in scratch */ + mr %r16,%r5 /* and the new lock */ + mr %r17,%r6 /* and the PCB */ - lwz %r6,PCB_FLAGS(%r5) + lwz %r7,PCB_FLAGS(%r17) /* Save FPU context if needed */ - andi. %r6, %r6, PCB_FPU + andi. %r7, %r7, PCB_FPU beq .L1 bl save_fpu .L1: - lwz %r6,PCB_FLAGS(%r5) + mr %r3,%r14 /* restore old thread ptr */ + lwz %r7,PCB_FLAGS(%r17) /* Save Altivec context if needed */ - andi. %r6, %r6, PCB_VEC + andi. %r7, %r7, PCB_VEC beq .L2 bl save_vec @@ -118,7 +119,19 @@ ENTRY(cpu_switch) mr %r3,%r14 /* restore old thread ptr */ bl pmap_deactivate /* Deactivate the current pmap */ + stw %r16,TD_LOCK(%r14) /* ULE: update old thread's lock */ + cpu_switchin: +#if defined(SMP) && defined(SCHED_ULE) + /* Wait for the new thread to become unblocked */ + lis %r6,blocked_lock@ha + addi %r6,%r6,blocked_lock@l +blocked_loop: + lwz %r7,TD_LOCK(%r15) + cmpw %r6,%r7 + beq blocked_loop +#endif + mfsprg %r7,0 /* Get the pcpu pointer */ stw %r15,PC_CURTHREAD(%r7) /* Store new current thread */ lwz %r17,TD_PCB(%r15) /* Store new current PCB */ Modified: stable/8/sys/powerpc/booke/mp_cpudep.c ============================================================================== --- stable/8/sys/powerpc/booke/mp_cpudep.c Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/booke/mp_cpudep.c Sat Nov 28 19:37:58 2009 (r199891) @@ -47,7 +47,7 @@ extern void icache_inval(void); volatile void *ap_pcpu; -uint32_t +uintptr_t cpudep_ap_bootstrap() { uint32_t msr, sp, csr; @@ -78,3 +78,8 @@ cpudep_ap_bootstrap() return (sp); } + +void +cpudep_ap_setup() +{ +} Modified: stable/8/sys/powerpc/include/pcpu.h ============================================================================== --- stable/8/sys/powerpc/include/pcpu.h Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/include/pcpu.h Sat Nov 28 19:37:58 2009 (r199891) @@ -43,8 +43,8 @@ struct pmap; struct thread *pc_vecthread; /* current vec user */ \ uintptr_t pc_hwref; \ uint32_t pc_pir; \ - int pc_bsp:1; \ - int pc_awake:1; \ + int pc_bsp; \ + volatile int pc_awake; \ uint32_t pc_ipimask; \ register_t pc_tempsave[CPUSAVE_LEN]; \ register_t pc_disisave[CPUSAVE_LEN]; \ Modified: stable/8/sys/powerpc/include/smp.h ============================================================================== --- stable/8/sys/powerpc/include/smp.h Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/include/smp.h Sat Nov 28 19:37:58 2009 (r199891) @@ -48,7 +48,8 @@ struct cpuref { }; void pmap_cpu_bootstrap(int); -uint32_t cpudep_ap_bootstrap(void); +uintptr_t cpudep_ap_bootstrap(void); +void cpudep_ap_setup(void); void machdep_ap_bootstrap(void); #endif /* !LOCORE */ Modified: stable/8/sys/powerpc/include/spr.h ============================================================================== --- stable/8/sys/powerpc/include/spr.h Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/include/spr.h Sat Nov 28 19:37:58 2009 (r199891) @@ -50,7 +50,7 @@ #define mtspr64(reg,valhi,vallo,scratch) \ __asm __volatile(" \ mfmsr %0; \ - insrdi %0,1,1,0; \ + insrdi %0,%5,1,0; \ mtmsrd %0; \ isync; \ \ @@ -62,13 +62,13 @@ clrldi %0,%0,1; \ mtmsrd %0; \ isync;" \ - : "=r"(scratch), "=r"(valhi) : "r"(vallo), "K"(reg), "r"(32)) + : "=r"(scratch), "=r"(valhi) : "r"(vallo), "K"(reg), "r"(32), "r"(1)) #define mfspr64upper(reg,scratch) \ ( { register_t val; \ __asm __volatile(" \ mfmsr %0; \ - insrdi %0,1,1,0; \ + insrdi %0,%4,1,0; \ mtmsrd %0; \ isync; \ \ @@ -78,7 +78,7 @@ clrldi %0,%0,1; \ mtmsrd %0; \ isync;" \ - : "=r"(scratch), "=r"(val) : "K"(reg), "r"(32)); \ + : "=r"(scratch), "=r"(val) : "K"(reg), "r"(32), "r"(1)); \ val; } ) #endif /* _LOCORE */ Modified: stable/8/sys/powerpc/powerpc/cpu.c ============================================================================== --- stable/8/sys/powerpc/powerpc/cpu.c Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/powerpc/cpu.c Sat Nov 28 19:37:58 2009 (r199891) @@ -69,6 +69,7 @@ #include <machine/bus.h> #include <machine/hid.h> #include <machine/md_var.h> +#include <machine/smp.h> #include <machine/spr.h> int powerpc_pow_enabled; @@ -112,9 +113,6 @@ static const struct cputab models[] = { static char model[64]; SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, model, 0, ""); -register_t l2cr_config = 0; -register_t l3cr_config = 0; - static void cpu_print_speed(void); static void cpu_print_cacheinfo(u_int, uint16_t); @@ -258,11 +256,6 @@ cpu_setup(u_int cpuid) case MPC7450: case MPC7455: case MPC7457: - /* Only MPC745x CPUs have an L3 cache. */ - - l3cr_config = mfspr(SPR_L3CR); - - /* Fallthrough */ case MPC750: case IBM750FX: case MPC7400: @@ -272,8 +265,6 @@ cpu_setup(u_int cpuid) cpu_print_speed(); printf("\n"); - l2cr_config = mfspr(SPR_L2CR); - if (bootverbose) cpu_print_cacheinfo(cpuid, vers); break; @@ -366,15 +357,15 @@ cpu_print_cacheinfo(u_int cpuid, uint16_ printf("L1 D-cache %sabled\n", (hid & HID0_DCE) ? "en" : "dis"); printf("cpu%u: ", cpuid); - if (l2cr_config & L2CR_L2E) { + if (mfspr(SPR_L2CR) & L2CR_L2E) { switch (vers) { case MPC7450: case MPC7455: case MPC7457: printf("256KB L2 cache, "); - if (l3cr_config & L3CR_L3E) + if (mfspr(SPR_L3CR) & L3CR_L3E) printf("%cMB L3 backside cache", - l3cr_config & L3CR_L3SIZ ? '2' : '1'); + mfspr(SPR_L3CR) & L3CR_L3SIZ ? '2' : '1'); else printf("L3 cache disabled"); printf("\n"); @@ -383,7 +374,7 @@ cpu_print_cacheinfo(u_int cpuid, uint16_ printf("512KB L2 cache\n"); break; default: - switch (l2cr_config & L2CR_L2SIZ) { + switch (mfspr(SPR_L2CR) & L2CR_L2SIZ) { case L2SIZ_256K: printf("256KB "); break; @@ -394,9 +385,9 @@ cpu_print_cacheinfo(u_int cpuid, uint16_ printf("1MB "); break; } - printf("write-%s", (l2cr_config & L2CR_L2WT) + printf("write-%s", (mfspr(SPR_L2CR) & L2CR_L2WT) ? "through" : "back"); - if (l2cr_config & L2CR_L2PE) + if (mfspr(SPR_L2CR) & L2CR_L2PE) printf(", with parity"); printf(" backside cache\n"); break; Modified: stable/8/sys/powerpc/powerpc/mp_machdep.c ============================================================================== --- stable/8/sys/powerpc/powerpc/mp_machdep.c Sat Nov 28 18:36:58 2009 (r199890) +++ stable/8/sys/powerpc/powerpc/mp_machdep.c Sat Nov 28 19:37:58 2009 (r199891) @@ -64,7 +64,10 @@ static u_int ipi_msg_cnt[32]; void machdep_ap_bootstrap(void) { + /* Set up important bits on the CPU (HID registers, etc.) */ + cpudep_ap_setup(); + /* Set PIR */ PCPU_SET(pir, mfspr(SPR_PIR)); PCPU_SET(awake, 1); __asm __volatile("msync; isync"); @@ -78,7 +81,7 @@ machdep_ap_bootstrap(void) __asm __volatile("mtdec %0" :: "r"(ap_decr)); atomic_add_int(&ap_awake, 1); - CTR1(KTR_SMP, "SMP: AP CPU%d launched", PCPU_GET(cpuid)); + printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid)); /* Initialize curthread */ PCPU_SET(curthread, PCPU_GET(idlethread)); @@ -86,6 +89,8 @@ machdep_ap_bootstrap(void) /* Let the DEC and external interrupts go */ mtmsr(mfmsr() | PSL_EE); + + /* Announce ourselves awake, and enter the scheduler */ sched_throw(NULL); } @@ -247,6 +252,9 @@ cpu_mp_unleash(void *dummy) mp_ncpus, cpus, smp_cpus); } + /* Let the APs get into the scheduler */ + DELAY(10000); + smp_active = 1; smp_started = 1; }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200911281937.nASJbwbl065123>