Date: Fri, 5 Nov 2004 20:24:38 GMT From: John Baldwin <jhb@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 64372 for review Message-ID: <200411052024.iA5KOcLW027544@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=64372 Change 64372 by jhb@jhb_slimer on 2004/11/05 20:23:43 IFC @64371 (more merges). Affected files ... .. //depot/projects/smpng/sys/arm/arm/cpufunc.c#4 integrate .. //depot/projects/smpng/sys/arm/arm/fusu.S#3 integrate .. //depot/projects/smpng/sys/arm/arm/identcpu.c#3 integrate .. //depot/projects/smpng/sys/arm/arm/locore.S#4 integrate .. //depot/projects/smpng/sys/arm/arm/support.S#5 integrate .. //depot/projects/smpng/sys/arm/arm/swtch.S#3 integrate .. //depot/projects/smpng/sys/arm/arm/trap.c#3 integrate .. //depot/projects/smpng/sys/arm/xscale/i80321/iq31244_machdep.c#2 integrate .. //depot/projects/smpng/sys/dev/random/randomdev_soft.c#6 integrate .. //depot/projects/smpng/sys/kern/subr_sleepqueue.c#12 integrate Differences ... ==== //depot/projects/smpng/sys/arm/arm/cpufunc.c#4 (text+ko) ==== @@ -45,7 +45,7 @@ * Created : 30/01/97 */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/arm/arm/cpufunc.c,v 1.3 2004/09/23 21:59:43 cognet Exp $"); +__FBSDID("$FreeBSD: src/sys/arm/arm/cpufunc.c,v 1.4 2004/11/05 19:48:40 cognet Exp $"); #include <sys/cdefs.h> @@ -973,7 +973,11 @@ cpufuncs = arm9_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ get_cachetype_cp15(); +#ifdef ARM9_CACHE_WRITE_THROUGH pmap_pte_init_arm9(); +#else + pmap_pte_init_generic(); +#endif return 0; } #endif /* CPU_ARM9 */ ==== //depot/projects/smpng/sys/arm/arm/fusu.S#3 (text+ko) ==== @@ -37,7 +37,7 @@ #include <machine/asmacros.h> #include <machine/armreg.h> #include "assym.s" -__FBSDID("$FreeBSD: src/sys/arm/arm/fusu.S,v 1.2 2004/09/28 14:39:26 cognet Exp $"); +__FBSDID("$FreeBSD: src/sys/arm/arm/fusu.S,v 1.3 2004/11/05 19:50:48 cognet Exp $"); #ifdef MULTIPROCESSOR .Lcpu_info: @@ -52,6 +52,56 @@ * Fetch an int from the user's address space. */ +ENTRY(casuptr) +#ifdef MULTIPROCESSOR + /* XXX Probably not appropriate for non-Hydra SMPs */ + stmfd sp!, {r0, r14} + bl _C_LABEL(cpu_number) + ldr r2, .Lcpu_info + ldr r2, [r2, r0, lsl #2] + ldr r2, [r2, #CI_CURPCB] + ldmfd sp!, {r0, r14} +#else + ldr r3, .Lcurpcb + ldr r3, [r3] +#endif + +#ifdef DIAGNOSTIC + teq r3, #0x00000000 + beq .Lfusupcbfault +#endif + stmfd sp!, {r4} + adr r4, .Lfusufault + str r4, [r3, #PCB_ONFAULT] + ldmfd sp!, {r4} + ldrt r3, [r0] + cmp r3, r1 + movne r0, r3 + movne pc, lr + strt r2, [r0] + mov r0, r1 +#ifdef MULTIPROCESSOR + /* XXX Probably not appropriate for non-Hydra SMPs */ + stmfd sp!, {r0, r14} + bl _C_LABEL(cpu_number) + ldr r2, .Lcpu_info + ldr r2, [r2, r0, lsl #2] + ldr r2, [r2, #CI_CURPCB] + ldmfd sp!, {r0, r14} +#else + ldr r3, .Lcurpcb + ldr r3, [r3] +#endif + mov r1, #0x00000000 + str r1, [r3, #PCB_ONFAULT] + mov pc, lr + + +/* + * fuword(caddr_t uaddr); + * Fetch an int from the user's address space. + */ + ENTRY(fuword32) ENTRY(fuword) #ifdef MULTIPROCESSOR @@ -254,6 +304,7 @@ * Store an int in the user's address space. */ +ENTRY(suword32) ENTRY(suword) #ifdef MULTIPROCESSOR /* XXX Probably not appropriate for non-Hydra SMPs */ @@ -282,8 +333,6 @@ str r0, [r2, #PCB_ONFAULT] mov pc, lr -ENTRY(suword32) - adr pc, _C_LABEL(suword) /* * suswintr(caddr_t uaddr, short x); * Store a short in the user's address space. Can be called during an ==== //depot/projects/smpng/sys/arm/arm/identcpu.c#3 (text+ko) ==== @@ -42,7 +42,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/arm/arm/identcpu.c,v 1.2 2004/09/23 22:11:43 cognet Exp $"); +__FBSDID("$FreeBSD: src/sys/arm/arm/identcpu.c,v 1.3 2004/11/05 19:51:23 cognet Exp $"); #include <sys/systm.h> #include <sys/param.h> #include <sys/malloc.h> @@ -277,7 +277,6 @@ * The remaining fields in the cpu structure are filled in appropriately. */ -#if 0 static const char * const wtnames[] = { "write-through", "write-back", @@ -296,7 +295,6 @@ "**unknown 14**", "**unknown 15**", }; -#endif extern int ctrl; void @@ -365,6 +363,24 @@ if (ctrl & CPU_CONTROL_BPRD_ENABLE) printf(" branch prediction enabled"); + /* Print cache info. */ + if (arm_picache_line_size == 0 && arm_pdcache_line_size == 0) + return; + + if (arm_pcache_unified) { + printf("%dKB/%dB %d-way %s unified cache\n", + arm_pdcache_size / 1024, + arm_pdcache_line_size, arm_pdcache_ways, + wtnames[arm_pcache_type]); + } else { + printf("%dKB/%dB %d-way Instruction cache\n", + arm_picache_size / 1024, + arm_picache_line_size, arm_picache_ways); + printf("%dKB/%dB %d-way %s Data cache\n", + arm_pdcache_size / 1024, + arm_pdcache_line_size, arm_pdcache_ways, + wtnames[arm_pcache_type]); + } printf("\n"); } ==== //depot/projects/smpng/sys/arm/arm/locore.S#4 (text+ko) ==== @@ -37,7 +37,7 @@ #include <machine/asm.h> #include <machine/armreg.h> #include <machine/pte.h> -__FBSDID("$FreeBSD: src/sys/arm/arm/locore.S,v 1.4 2004/09/28 14:37:08 cognet Exp $"); +__FBSDID("$FreeBSD: src/sys/arm/arm/locore.S,v 1.5 2004/11/05 19:52:55 cognet Exp $"); /* What size should this really be ? It is only used by init_arm() */ #define INIT_ARM_STACK_SIZE 2048 @@ -157,6 +157,19 @@ subs r2, r2, #4 bgt .L1 + ldr r4, =KERNVIRTADDR + cmp pc, r4 +#if KERNVIRTADDR > KERNPHYSADDR + ldrlt r4, =KERNVIRTADDR + ldrlt r5, =KERNPHYSADDR + sublt r4, r4, r5 + addlt pc, pc, r4 +#else + ldrgt r4, =KERNPHYSADDR + ldrgt r5, =KERNVIRTADDR + subgt r4, r4, r5 + sublt pc, pc, r4 +#endif ldr fp, =KERNVIRTADDR /* trace back starts here */ bl _C_LABEL(initarm) /* Off we go */ ==== //depot/projects/smpng/sys/arm/arm/support.S#5 (text+ko) ==== @@ -26,14 +26,10 @@ #include <machine/asm.h> #include <machine/asmacros.h> -__FBSDID("$FreeBSD: src/sys/arm/arm/support.S,v 1.5 2004/09/23 22:18:56 cognet Exp $"); +__FBSDID("$FreeBSD: src/sys/arm/arm/support.S,v 1.6 2004/11/05 19:50:48 cognet Exp $"); #include "assym.s" -ENTRY(casuptr) - mov r1, r2 - bl suword - /* * memset: Sets a block of memory to the specified value * ==== //depot/projects/smpng/sys/arm/arm/swtch.S#3 (text+ko) ==== @@ -78,12 +78,13 @@ * */ +#include "assym.s" + #include <machine/asm.h> #include <machine/asmacros.h> #include <machine/armreg.h> -__FBSDID("$FreeBSD: src/sys/arm/arm/swtch.S,v 1.3 2004/09/28 14:37:39 cognet Exp $"); +__FBSDID("$FreeBSD: src/sys/arm/arm/swtch.S,v 1.4 2004/11/05 19:54:13 cognet Exp $"); -#include "assym.s" /* * New experimental definitions of IRQdisable and IRQenable @@ -118,8 +119,6 @@ bic r14, r14, #(I32_bit | F32_bit) ; \ msr cpsr_c, r14 -.Lpcpu: - .word _C_LABEL(__pcpu) .Lcurpcb: .word _C_LABEL(__pcpu) + PC_CURPCB .Lcpufuncs: @@ -130,35 +129,22 @@ .word _C_LABEL(cpu_do_powersave) ENTRY(cpu_throw) mov r4, r0 - ldr r0, .Lcurthread mov r5, r1 /* - * r4 = lwp - * r5 = lwp0 + * r4 = oldtd + * r5 = newtd */ - mov r2, #0x00000000 /* curthread = NULL */ - str r2, [r0] - - /* - * We're about to clear both the cache and the TLB. - * Make sure to zap the 'last cache state' pointer since the - * pmap might be about to go away. Also ensure the outgoing - * VM space's cache state is marked as NOT resident in the - * cache, and that lwp0's cache state IS resident. - */ - ldr r7, [r4, #(TD_PCB)] /* r7 = old lwp's PCB */ + ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */ /* Switch to lwp0 context */ ldr r9, .Lcpufuncs mov lr, pc ldr pc, [r9, #CF_IDCACHE_WBINV_ALL] - ldr r0, [r7, #(PCB_PL1VEC)] ldr r1, [r7, #(PCB_DACR)] - /* * r0 = Pointer to L1 slot for vector_page (or NULL) * r1 = lwp0's DACR @@ -201,8 +187,6 @@ mov lr, pc ldr pc, [r9, #CF_CONTEXT_SWITCH] - ldr r0, .Lcurpcb - /* Restore all the save registers */ #ifndef __XSCALE__ add r1, r7, #PCB_R8 @@ -215,46 +199,35 @@ ldr r12, [r7, #(PCB_R12)] ldr r13, [r7, #(PCB_SP)] #endif - str r7, [r0] /* curpcb = lwp0's PCB */ - mov r1, #0x00000000 /* r5 = old lwp = NULL */ - mov r6, r5 + mov r0, #0x00000000 /* r5 = old lwp = NULL */ + mov r1, r5 b .Lswitch_resume ENTRY(cpu_switch) stmfd sp!, {r4-r7, lr} - mov r6, r1 - mov r1, r0 .Lswitch_resume: - /* rem: r1 = old lwp */ - /* rem: r4 = return value [not used if came from cpu_switchto()] */ - /* rem: r6 = new process */ + /* rem: r0 = old lwp */ /* rem: interrupts are disabled */ #ifdef MULTIPROCESSOR /* XXX use curcpu() */ - ldr r0, .Lcpu_info_store - str r0, [r6, #(L_CPU)] + ldr r2, .Lcpu_info_store + str r2, [r6, #(L_CPU)] #endif /* Process is now on a processor. */ /* We have a new curthread now so make a note it */ ldr r7, .Lcurthread - str r6, [r7] + str r1, [r7] /* Hook in a new pcb */ ldr r7, .Lcurpcb - ldr r0, [r6, #(TD_PCB)] - str r0, [r7] - - /* rem: r1 = old lwp */ - /* rem: r4 = return value */ - /* rem: r6 = new process */ + ldr r2, [r1, #TD_PCB] + str r2, [r7] - /* Remember the old thread in r0 */ - mov r0, r1 /* * If the old lwp on entry to cpu_switch was zero then the @@ -263,26 +236,24 @@ * straight to restoring the context for the new process. */ teq r0, #0x00000000 - beq .Lswitch_exited + beq .Lswitch_return - /* rem: r0 = old lwp */ - /* rem: r4 = return value */ - /* rem: r6 = new process */ + /* rem: r1 = new process */ /* rem: interrupts are enabled */ /* Stage two : Save old context */ /* Get the user structure for the old lwp. */ - ldr r1, [r0, #(TD_PCB)] + ldr r2, [r0, #(TD_PCB)] /* Save all the registers in the old lwp's pcb */ #ifndef __XSCALE__ - add r7, r1, #(PCB_R8) + add r7, r2, #(PCB_R8) stmia r7, {r8-r13} #else - strd r8, [r1, #(PCB_R8)] - strd r10, [r1, #(PCB_R10)] - strd r12, [r1, #(PCB_R12)] + strd r8, [r2, #(PCB_R8)] + strd r10, [r2, #(PCB_R10)] + strd r12, [r2, #(PCB_R12)] #endif /* @@ -291,12 +262,13 @@ */ /* Remember the old PCB. */ - mov r8, r1 + mov r8, r2 - /* r1 now free! */ /* Get the user structure for the new process in r9 */ - ldr r9, [r6, #(TD_PCB)] + ldr r9, [r1, #(TD_PCB)] + + /* r1 now free! */ /* * This can be optimised... We know we want to go from SVC32 @@ -310,9 +282,6 @@ str sp, [r8, #(PCB_UND_SP)] msr cpsr_c, r3 /* Restore the old mode */ - /* rem: r0 = old lwp */ - /* rem: r4 = return value */ - /* rem: r6 = new process */ /* rem: r8 = old PCB */ /* rem: r9 = new PCB */ /* rem: interrupts are enabled */ @@ -321,9 +290,6 @@ /* Third phase : restore saved context */ - /* rem: r0 = old lwp */ - /* rem: r4 = return value */ - /* rem: r6 = new lwp */ /* rem: r8 = old PCB */ /* rem: r9 = new PCB */ /* rem: interrupts are enabled */ @@ -342,16 +308,18 @@ ldr r0, [r8, #(PCB_DACR)] /* r0 = old DACR */ - ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */ + ldr r5, [r9, #(PCB_DACR)] /* r1 = new DACR */ teq r10, r11 /* Same L1? */ - cmpeq r0, r1 /* Same DACR? */ + cmpeq r0, r5 /* Same DACR? */ beq .Lcs_context_switched /* yes! */ - ldr r3, .Lblock_userspace_access + ldr r4, .Lblock_userspace_access - mov r2, #DOMAIN_CLIENT - cmp r1, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */ - beq .Lcs_cache_purge_skipped /* Yup. Don't flush cache */ + mov r2, #DOMAIN_CLIENT + cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */ + + beq .Lcs_cache_purge_skipped /* Yup. Don't flush cache */ + /* * Definately need to flush the cache. */ @@ -360,17 +328,13 @@ * Don't allow user space access between the purge and the switch. */ mov r2, #0x00000001 - str r2, [r3] + str r2, [r4] - stmfd sp!, {r0-r3} ldr r1, .Lcpufuncs mov lr, pc ldr pc, [r1, #CF_IDCACHE_WBINV_ALL] - ldmfd sp!, {r0-r3} .Lcs_cache_purge_skipped: - /* rem: r1 = new DACR */ - /* rem: r3 = &block_userspace_access */ - /* rem: r4 = return value */ + /* rem: r4 = &block_userspace_access */ /* rem: r6 = new lwp */ /* rem: r9 = new PCB */ /* rem: r10 = old L1 */ @@ -384,7 +348,7 @@ * as none will occur until interrupts are re-enabled after the * switch. */ - str r2, [r3] + str r2, [r4] /* * Ensure the vector table is accessible by fixing up the L1 @@ -392,7 +356,7 @@ cmp r7, #0 /* No need to fixup vector table? */ ldrne r2, [r7] /* But if yes, fetch current value */ ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */ - mcr p15, 0, r1, c3, c0, 0 /* Update DACR for new context */ + mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */ cmpne r2, r0 /* Stuffing the same value? */ #ifndef PMAP_INCLUDE_PTE_SYNC strne r0, [r7] /* Nope, update it */ @@ -440,8 +404,6 @@ /* XXXSCW: Safe to re-enable FIQs here */ - /* rem: r4 = return value */ - /* rem: r6 = new lwp */ /* rem: r9 = new PCB */ /* @@ -471,8 +433,6 @@ ldr r13, [r7, #(PCB_SP)] #endif - /* rem: r4 = return value */ - /* rem: r5 = new lwp's proc */ /* rem: r6 = new lwp */ /* rem: r7 = new pcb */ @@ -482,7 +442,6 @@ bl _C_LABEL(arm_fpe_core_changecontext) #endif - /* rem: r4 = return value */ /* rem: r5 = new lwp's proc */ /* rem: r6 = new lwp */ /* rem: r7 = new PCB */ @@ -494,19 +453,6 @@ * cpu_switch() was called and return. */ ldmfd sp!, {r4-r7, pc} -.Lswitch_exited: - /* - * We skip the cache purge because cpu_throw() already did it. - * Load up registers the way .Lcs_cache_purge_skipped expects. - * Userspace access already blocked by cpu_throw(). - */ - ldr r9, [r6, #(TD_PCB)] /* r9 = new PCB */ - ldr r3, .Lblock_userspace_access - mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */ - mov r5, #0 /* No previous cache state */ - ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */ - ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */ - b .Lcs_cache_purge_skipped #ifdef DIAGNOSTIC .Lswitch_bogons: adr r0, .Lswitch_panic_str ==== //depot/projects/smpng/sys/arm/arm/trap.c#3 (text+ko) ==== @@ -82,7 +82,7 @@ #include "opt_ktrace.h" #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/arm/arm/trap.c,v 1.2 2004/09/23 22:22:33 cognet Exp $"); +__FBSDID("$FreeBSD: src/sys/arm/arm/trap.c,v 1.3 2004/11/05 19:57:10 cognet Exp $"); #include <sys/types.h> @@ -215,7 +215,6 @@ #endif /* CPU_ABORT_FIXUP_REQUIRED */ } -extern int curpid; void data_abort_handler(trapframe_t *tf) { @@ -229,6 +228,8 @@ u_int sticks = 0; int error = 0; struct ksig ksig; + struct proc *p; + /* Grab FAR/FSR before enabling interrupts */ far = cpu_faultaddress(); @@ -244,13 +245,17 @@ #endif td = curthread; + p = td->td_proc; /* Data abort came from user mode? */ user = TRAP_USERMODE(tf); if (user) { + sticks = td->td_sticks; td->td_frame = tf; if (td->td_ucred != td->td_proc->p_ucred) cred_update_thread(td); + if (td->td_pflags & TDP_SA) + thread_user_enter(td); } /* Grab the current pcb */ @@ -289,10 +294,6 @@ return; } - if (user) { - sticks = td->td_sticks; - td->td_frame = tf; - } /* * Make sure the Program Counter is sane. We could fall foul of * someone executing Thumb code, in which case the PC might not @@ -408,6 +409,11 @@ onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; + if (map != kernel_map) { + PROC_LOCK(p); + p->p_lock++; + PROC_UNLOCK(p); + } error = vm_fault(map, va, ftype, (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : VM_FAULT_NORMAL); pcb->pcb_onfault = onfault; @@ -415,6 +421,11 @@ goto out; } + if (map != kernel_map) { + PROC_LOCK(p); + p->p_lock++; + PROC_UNLOCK(p); + } if (user == 0) { if (pcb->pcb_onfault) { tf->tf_r0 = error; @@ -518,8 +529,13 @@ { /* Alignment faults are always fatal if they occur in kernel mode */ - if (!TRAP_USERMODE(tf)) - dab_fatal(tf, fsr, far, td, ksig); + if (!TRAP_USERMODE(tf)) { + if (!td || !td->td_pcb->pcb_onfault) + dab_fatal(tf, fsr, far, td, ksig); + tf->tf_r0 = EFAULT; + tf->tf_pc = (int)td->td_pcb->pcb_onfault; + return (0); + } /* pcb_onfault *must* be NULL at this point */ @@ -676,12 +692,14 @@ prefetch_abort_handler(trapframe_t *tf) { struct thread *td; + struct proc * p; struct vm_map *map; vm_offset_t fault_pc, va; int error = 0; u_int sticks = 0; struct ksig ksig; + #if 0 /* Update vmmeter statistics */ uvmexp.traps++; @@ -692,11 +710,14 @@ #endif td = curthread; + p = td->td_proc; if (TRAP_USERMODE(tf)) { + td->td_frame = tf; if (td->td_ucred != td->td_proc->p_ucred) cred_update_thread(td); - + if (td->td_proc->p_flag & P_SA) + thread_user_enter(td); } fault_pc = tf->tf_pc; if (td->td_critnest == 0 && @@ -721,8 +742,6 @@ /* Prefetch aborts cannot happen in kernel mode */ if (__predict_false(!TRAP_USERMODE(tf))) dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig); - /* Get fault address */ - td->td_frame = tf; sticks = td->td_sticks; @@ -746,8 +765,20 @@ if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1)) goto out; + if (map != kernel_map) { + PROC_LOCK(p); + p->p_lock++; + PROC_UNLOCK(p); + } + error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE, VM_FAULT_NORMAL); + if (map != kernel_map) { + PROC_LOCK(p); + p->p_lock--; + PROC_UNLOCK(p); + } + if (__predict_true(error == 0)) goto out; @@ -861,16 +892,14 @@ else callp = &p->p_sysent->sv_table[code]; nargs = callp->sy_narg & SYF_ARGMASK; - if (nargs <= nap) - args = ap; - else { - memcpy(copyargs, ap, nap * sizeof(register_t)); + memcpy(copyargs, ap, nap * sizeof(register_t)); + if (nargs > nap) { error = copyin((void *)frame->tf_usr_sp, copyargs + nap, (nargs - nap) * sizeof(register_t)); if (error) goto bad; - args = copyargs; } + args = copyargs; error = 0; #ifdef KTRACE if (KTRPOINT(td, KTR_SYSCALL)) @@ -927,14 +956,10 @@ struct thread *td = curthread; uint32_t insn; - /* - * Enable interrupts if they were enabled before the exception. - * Since all syscalls *should* come from user mode it will always - * be safe to enable them, but check anyway. - */ + td->td_frame = frame; - if (td->td_critnest == 0 && !(frame->tf_spsr & I32_bit)) - enable_interrupts(I32_bit); + if (td->td_proc->p_flag & P_SA) + thread_user_enter(td); /* * Make sure the program counter is correctly aligned so we * don't take an alignment fault trying to read the opcode. @@ -945,7 +970,14 @@ return; } insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE); - td->td_frame = frame; + /* + * Enable interrupts if they were enabled before the exception. + * Since all syscalls *should* come from user mode it will always + * be safe to enable them, but check anyway. + */ + if (td->td_critnest == 0 && !(frame->tf_spsr & I32_bit)) + enable_interrupts(I32_bit); + syscall(td, frame, insn); } ==== //depot/projects/smpng/sys/arm/xscale/i80321/iq31244_machdep.c#2 (text+ko) ==== @@ -48,7 +48,7 @@ #include "opt_msgbuf.h" #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/arm/xscale/i80321/iq31244_machdep.c,v 1.1 2004/09/23 22:45:36 cognet Exp $"); +__FBSDID("$FreeBSD: src/sys/arm/xscale/i80321/iq31244_machdep.c,v 1.2 2004/11/05 19:52:55 cognet Exp $"); #define _ARM32_BUS_DMA_PRIVATE #include <sys/param.h> @@ -116,8 +116,6 @@ #else #define UND_STACK_SIZE 1 #endif -#define KERNEL_VM_BASE (KERNBASE + 0x00c00000) -#define KERNEL_VM_SIZE 0x05000000 extern u_int data_abort_handler_address; extern u_int prefetch_abort_handler_address; @@ -183,16 +181,7 @@ VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE, }, -#if 0 { - 0x80000000, - 0x80000000, - 0x08000000, - VM_PROT_READ|VM_PROT_WRITE, - PTE_NOCACHE, - }, -#endif - { 0, 0, 0, @@ -202,17 +191,14 @@ }; #define SDRAM_START 0xa0000000 -void DO_corb(void); extern vm_offset_t xscale_cache_clean_addr; void * initarm(void *arg, void *arg2) { - struct pcpu *pc; struct pv_addr kernel_l1pt; struct pv_addr proc0_uarea; - struct pv_addr altkern[KERNEL_PT_KERNEL_NUM]; int loop; u_int kerneldatasize, symbolsize; u_int l1pagetable; @@ -224,6 +210,7 @@ i80321_calibrate_delay(); cninit(); + i = 0; set_cpufuncs(); /* * Fetch the SDRAM start/size from the i80321 SDRAM configration @@ -241,17 +228,17 @@ i += 2; fake_preload[i++] = MODINFO_ADDR; fake_preload[i++] = sizeof(vm_offset_t); - fake_preload[i++] = KERNBASE; + fake_preload[i++] = KERNBASE + 0x00200000; fake_preload[i++] = MODINFO_SIZE; fake_preload[i++] = sizeof(uint32_t); - fake_preload[i++] = (uint32_t)&end - KERNBASE; + fake_preload[i++] = (uint32_t)&end - KERNBASE - 0x00200000; fake_preload[i++] = 0; fake_preload[i] = 0; preload_metadata = (void *)fake_preload; physmem = memsize / PAGE_SIZE; - pc = &__pcpu; - pcpu_init(pc, 0, sizeof(struct pcpu)); + + pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); physical_start = (vm_offset_t) SDRAM_START; @@ -279,10 +266,6 @@ L2_TABLE_SIZE / PAGE_SIZE); } - for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++) { - valloc_pages(altkern[loop], L2_TABLE_SIZE / PAGE_SIZE); - } - /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be @@ -314,7 +297,7 @@ * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ - l1pagetable = kernel_l1pt.pv_pa; + l1pagetable = kernel_l1pt.pv_va; /* Map the L2 pages tables in the L1 page table */ @@ -323,19 +306,15 @@ for (i = 0; i < KERNEL_PT_KERNEL_NUM; i++) { pmap_link_l2pt(l1pagetable, KERNBASE + i * 0x00400000, &kernel_pt_table[KERNEL_PT_KERNEL + i]); - pmap_link_l2pt(l1pagetable, 0xa0000000 + i * 0x00400000, - &altkern[i]); } for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) - pmap_link_l2pt(l1pagetable, KERNBASE + (KERNEL_PT_KERNEL_NUM + loop) * 0x00400000, + pmap_link_l2pt(l1pagetable, KERNBASE + (i + loop) * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE, &kernel_pt_table[KERNEL_PT_IOPXS]); pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000, (((uint32_t)(&end) - KERNBASE - 0x200000) + PAGE_SHIFT) & ~PAGE_SHIFT, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - pmap_map_chunk(l1pagetable, KERNPHYSADDR, KERNPHYSADDR, - (((uint32_t)(&end) - KERNBASE - 0x200000) + PAGE_SHIFT) & ~PAGE_SHIFT, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); @@ -358,11 +337,6 @@ kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); } - for (loop = 0; loop < 4; loop++) { - pmap_map_chunk(l1pagetable, altkern[loop].pv_va, - altkern[loop].pv_pa, L2_TABLE_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); - } /* Map the Mini-Data cache clean area. */ xscale_setup_minidata(l1pagetable, minidataclean.pv_va, minidataclean.pv_pa); ==== //depot/projects/smpng/sys/dev/random/randomdev_soft.c#6 (text+ko) ==== @@ -208,7 +208,8 @@ * Command the hash/reseed thread to end and wait for it to finish */ random_kthread_control = -1; - tsleep((void *)&random_kthread_control, PUSER, "term", 0); + tsleep((void *)&random_kthread_control, curthread->td_priority, "term", + 0); /* Destroy the harvest fifos */ while (!STAILQ_EMPTY(&emptyfifo.head)) { @@ -281,7 +282,8 @@ /* Found nothing, so don't belabour the issue */ if (!active) - tsleep(&harvestfifo, PUSER, "-", hz / 10); + tsleep(&harvestfifo, curthread->td_priority, "-", + hz / 10); } ==== //depot/projects/smpng/sys/kern/subr_sleepqueue.c#12 (text+ko) ==== @@ -62,7 +62,7 @@ #include "opt_sleepqueue_profiling.h" #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/subr_sleepqueue.c,v 1.13 2004/10/12 18:36:20 jhb Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/subr_sleepqueue.c,v 1.14 2004/11/05 20:19:58 jhb Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -252,7 +252,7 @@ } /* - * Places the current thread on the sleepqueue for the specified wait + * Places the current thread on the sleep queue for the specified wait * channel. If INVARIANTS is enabled, then it associates the passed in * lock with the sleepq to make sure it is held when that sleep queue is * woken up. @@ -262,7 +262,7 @@ { struct sleepqueue_chain *sc; struct sleepqueue *sq; - struct thread *td, *td1; + struct thread *td; td = curthread; sc = SC_LOOKUP(wchan); @@ -299,20 +299,13 @@ sq->sq_lock = lock; sq->sq_type = flags & SLEEPQ_TYPE; #endif - TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); } else { MPASS(wchan == sq->sq_wchan); MPASS(lock == sq->sq_lock); MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); - TAILQ_FOREACH(td1, &sq->sq_blocked, td_slpq) - if (td1->td_priority > td->td_priority) - break; - if (td1 != NULL) - TAILQ_INSERT_BEFORE(td1, td, td_slpq); - else - TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); } + TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); td->td_sleepqueue = NULL; mtx_lock_spin(&sched_lock); td->td_wchan = wchan; @@ -404,7 +397,7 @@ /* * Switches to another thread if we are still asleep on a sleep queue and - * drop the lock on the sleepqueue chain. Returns with sched_lock held. + * drop the lock on the sleep queue chain. Returns with sched_lock held. */ static void sleepq_switch(void *wchan) @@ -675,7 +668,7 @@ sleepq_signal(void *wchan, int flags, int pri) { struct sleepqueue *sq; - struct thread *td; + struct thread *td, *besttd; CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); @@ -687,11 +680,21 @@ KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), ("%s: mismatch between sleep/wakeup and cv_*", __func__)); - /* Remove first thread from queue and awaken it. */ - td = TAILQ_FIRST(&sq->sq_blocked); - sleepq_remove_thread(sq, td); + /* + * Find the highest priority thread on the queue. If there is a + * tie, use the thread that first appears in the queue as it has + * been sleeping the longest since threads are always added to + * the tail of sleep queues. >>> TRUNCATED FOR MAIL (1000 lines) <<<
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200411052024.iA5KOcLW027544>
