Date: Tue, 18 Oct 2011 16:37:28 +0000 (UTC) From: "Jayachandran C." <jchandra@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r226517 - in head/sys/mips: include mips Message-ID: <201110181637.p9IGbSJI081629@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: jchandra Date: Tue Oct 18 16:37:28 2011 New Revision: 226517 URL: http://svn.freebsd.org/changeset/base/226517 Log: Fix wakeup latency when sleeping with 'wait' If we handle an interrupt just before the 'wait' and the interrupt schedules some work, we need to skip the 'wait' call. The simple solution of calling sched_runnable() with interrupts disabled immediately before wait still leaves a window after the call and before 'wait' in which the same issue can occur. The solution implemented is to check the EPC in the interrupt handler, and if it is in a region before the 'wait' call, to fix up the EPC to skip the wait call. Reported/analysed by: adrian Fix suggested by: kib Reviewed by: jmallett, imp Modified: head/sys/mips/include/md_var.h head/sys/mips/mips/exception.S head/sys/mips/mips/machdep.c Modified: head/sys/mips/include/md_var.h ============================================================================== --- head/sys/mips/include/md_var.h Tue Oct 18 15:53:48 2011 (r226516) +++ head/sys/mips/include/md_var.h Tue Oct 18 16:37:28 2011 (r226517) @@ -56,6 +56,7 @@ void MipsSwitchFPState(struct thread *, u_long kvtop(void *addr); int is_cacheable_mem(vm_paddr_t addr); void mips_generic_reset(void); +void mips_wait(void); #define MIPS_DEBUG 0 Modified: head/sys/mips/mips/exception.S ============================================================================== --- head/sys/mips/mips/exception.S Tue Oct 18 15:53:48 2011 (r226516) +++ head/sys/mips/mips/exception.S Tue Oct 18 16:37:28 2011 (r226517) @@ -557,6 +557,33 @@ NNON_LEAF(MipsUserGenException, CALLFRAM .set at END(MipsUserGenException) + .set push + .set noat +NON_LEAF(mips_wait, CALLFRAME_SIZ, ra) + PTR_SUBU sp, sp, CALLFRAME_SIZ + .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ) + REG_S ra, CALLFRAME_RA(sp) # save RA + mfc0 t0, MIPS_COP_0_STATUS + xori t1, t0, MIPS_SR_INT_IE + mtc0 t1, MIPS_COP_0_STATUS + COP0_SYNC + jal sched_runnable + nop + REG_L ra, CALLFRAME_RA(sp) + mfc0 t0, MIPS_COP_0_STATUS + ori t1, t0, MIPS_SR_INT_IE + .align 4 +GLOBAL(MipsWaitStart) # this is 16 byte aligned + mtc0 t1, MIPS_COP_0_STATUS + bnez v0, MipsWaitEnd + nop + wait +GLOBAL(MipsWaitEnd) # MipsWaitStart + 16 + jr ra + PTR_ADDU sp, sp, CALLFRAME_SIZ +END(mips_wait) + .set pop + /*---------------------------------------------------------------------------- * * MipsKernIntr -- @@ -578,6 +605,19 @@ NNON_LEAF(MipsKernIntr, KERN_EXC_FRAME_S .set noat PTR_SUBU sp, sp, KERN_EXC_FRAME_SIZE .mask 0x80000000, (CALLFRAME_RA - KERN_EXC_FRAME_SIZE) + +/* + * Check for getting interrupts just before wait + */ + MFC0 k0, MIPS_COP_0_EXC_PC + ori k0, 0xf + xori k0, 0xf # 16 byte align + PTR_LA k1, MipsWaitStart + bne k0, k1, 1f + nop + PTR_ADDU k1, 16 # skip over wait + MTC0 k1, MIPS_COP_0_EXC_PC +1: /* * Save CPU state, building 'frame'. */ Modified: head/sys/mips/mips/machdep.c ============================================================================== --- head/sys/mips/mips/machdep.c Tue Oct 18 15:53:48 2011 (r226516) +++ head/sys/mips/mips/machdep.c Tue Oct 18 16:37:28 2011 (r226517) @@ -163,6 +163,9 @@ extern char MipsTLBMiss[], MipsTLBMissEn /* Cache error handler */ extern char MipsCache[], MipsCacheEnd[]; +/* MIPS wait skip region */ +extern char MipsWaitStart[], MipsWaitEnd[]; + extern char edata[], end[]; #ifdef DDB extern vm_offset_t ksym_start, ksym_end; @@ -327,6 +330,12 @@ void mips_vector_init(void) { /* + * Make sure that the Wait region logic is not been + * changed + */ + if (MipsWaitEnd - MipsWaitStart != 16) + panic("startup: MIPS wait region not correct"); + /* * Copy down exception vector code. */ if (MipsTLBMissEnd - MipsTLBMiss > 0x80) @@ -485,24 +494,9 @@ spinlock_exit(void) /* * call platform specific code to halt (until next interrupt) for the idle loop */ -/* - * This is disabled because of three issues: - * - * + By calling critical_enter(), any interrupt which occurs after that but - * before the wait instruction will be handled but not serviced (in the case - * of a netisr) because preemption is not allowed at this point; - * + Any fast interrupt handler which schedules an immediate or fast callout - * will not occur until the wait instruction is interrupted, as the clock - * has already been set by cpu_idleclock(); - * + There is currently no known way to atomically enable interrupts and call - * wait, which is how the i386/amd64 code gets around (1). Thus even if - * interrupts were disabled and reenabled just before the wait call, any - * interrupt that did occur may not interrupt wait. - */ void cpu_idle(int busy) { -#if 0 KASSERT((mips_rd_status() & MIPS_SR_INT_IE) != 0, ("interrupts disabled in idle process.")); KASSERT((mips_rd_status() & MIPS_INT_MASK) != 0, @@ -512,12 +506,11 @@ cpu_idle(int busy) critical_enter(); cpu_idleclock(); } - __asm __volatile ("wait"); + mips_wait(); if (!busy) { cpu_activeclock(); critical_exit(); } -#endif } int
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201110181637.p9IGbSJI081629>