Date: Wed, 19 Oct 2005 11:17:14 -0400 From: John Baldwin <jhb@freebsd.org> To: Robert Watson <rwatson@freebsd.org> Cc: Perforce Change Reviews <perforce@freebsd.org> Subject: Re: PERFORCE change 85514 for review Message-ID: <200510191117.15536.jhb@freebsd.org> In-Reply-To: <20051019122907.E2669@fledge.watson.org> References: <200510182003.j9IK3qp1002264@repoman.freebsd.org> <20051019122907.E2669@fledge.watson.org>
next in thread | previous in thread | raw e-mail | index | archive | help
On Wednesday 19 October 2005 07:30 am, Robert Watson wrote: > On Tue, 18 Oct 2005, John Baldwin wrote: > > http://perforce.freebsd.org/chv.cgi?CH=85514 > > > > Change 85514 by jhb@jhb_slimer on 2005/10/18 20:03:00 > > > > Rework KDB_STOP_NMI some to make it less KDB-specific. > > - Use stoppcbs[] on i386 and amd64 always, regardless of KDB_STOP_NMI. > > - Rename KDB_STOP_NMI to STOP_NMI and have it apply to any IPI_STOP > > ipi. > > - Fix ipi_all(), ipi_all_but_self(), and ipi_self() to correctly > > handle bitmapped IPIs and STOP_NMI. > > Wouldn't it make sense to simply always use an NMI when entering the > debugger on SMP? I guess there are some potential races in simultaneous > or nearly simultaneous entry of the debugger from multiple CPUs at once, > but it seems lately I've been bumping into a moderate number of cases > where the CPUs stack up and wedge during SMP debugger entry, possibly due > to the CPU entering the debugger holding a spinlock. I don't disagree, but I think we should always use NMI's to stop CPUs for debugger, shutdown, etc. rather than just special casing KDB. I wouldn't mind eventually making STOP_NMI on by default, but for now was preserving the same status quo as with KDB_STOP_NMI. > Robert N M Watson > > > Affected files ... > > > > .. //depot/projects/smpng/sys/amd64/amd64/mp_machdep.c#26 edit > > .. //depot/projects/smpng/sys/amd64/amd64/trap.c#36 edit > > .. //depot/projects/smpng/sys/amd64/conf/NOTES#22 edit > > .. //depot/projects/smpng/sys/amd64/include/smp.h#9 edit > > .. //depot/projects/smpng/sys/conf/options.amd64#14 edit > > .. //depot/projects/smpng/sys/conf/options.i386#50 edit > > .. //depot/projects/smpng/sys/conf/options.pc98#49 edit > > .. //depot/projects/smpng/sys/i386/conf/NOTES#108 edit > > .. //depot/projects/smpng/sys/i386/i386/mp_machdep.c#91 edit > > .. //depot/projects/smpng/sys/i386/i386/trap.c#86 edit > > .. //depot/projects/smpng/sys/i386/include/smp.h#17 edit > > .. //depot/projects/smpng/sys/kern/subr_kdb.c#12 edit > > .. //depot/projects/smpng/sys/kern/subr_smp.c#42 edit > > .. //depot/projects/smpng/sys/notes#61 edit > > .. //depot/projects/smpng/sys/pc98/conf/NOTES#45 edit > > .. //depot/projects/smpng/sys/sys/smp.h#15 edit > > > > Differences ... > > > > ==== //depot/projects/smpng/sys/amd64/amd64/mp_machdep.c#26 (text+ko) > > ==== > > > > @@ -28,7 +28,6 @@ > > __FBSDID("$FreeBSD: src/sys/amd64/amd64/mp_machdep.c,v 1.262 2005/08/19 > > 22:10:19 pjd Exp $"); > > > > #include "opt_cpu.h" > > -#include "opt_kdb.h" > > #include "opt_kstack_pages.h" > > #include "opt_mp_watchdog.h" > > #include "opt_sched.h" > > @@ -113,10 +112,30 @@ > > > > extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); > > > > +#ifdef STOP_NMI > > +volatile cpumask_t ipi_nmi_pending; > > + > > +static void ipi_nmi_selected(u_int32_t cpus); > > +#endif > > + > > /* > > * Local data and functions. > > */ > > > > +#ifdef STOP_NMI > > +/* > > + * Provide an alternate method of stopping other CPUs. If another CPU > > has + * disabled interrupts the conventional STOP IPI will be blocked. > > This + * NMI-based stop should get through in that case. > > + */ > > +static int stop_cpus_with_nmi = 1; > > +SYSCTL_INT(_debug, OID_AUTO, stop_cpus_with_nmi, CTLTYPE_INT | > > CTLFLAG_RW, + &stop_cpus_with_nmi, 0, ""); > > +TUNABLE_INT("debug.stop_cpus_with_nmi", &stop_cpus_with_nmi); > > +#else > > +#define stop_cpus_with_nmi 0 > > +#endif > > + > > static u_int logical_cpus; > > > > /* used to hold the AP's until we are ready to release them */ > > @@ -199,11 +218,6 @@ > > smp_topology = &mp_top; > > } > > > > - > > -#ifdef KDB_STOP_NMI > > -volatile cpumask_t ipi_nmi_pending; > > -#endif > > - > > /* > > * Calculate usable address in base memory for AP trampoline code. > > */ > > @@ -945,6 +959,12 @@ > > ipi = IPI_BITMAP_VECTOR; > > } > > > > +#ifdef STOP_NMI > > + if (ipi == IPI_STOP && stop_cpus_with_nmi) { > > + ipi_nmi_selected(cpus); > > + return; > > + } > > +#endif > > CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi); > > while ((cpu = ffs(cpus)) != 0) { > > cpu--; > > @@ -975,6 +995,10 @@ > > ipi_all(u_int ipi) > > { > > > > + if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { > > + ipi_selected(all_cpus, ipi); > > + return; > > + } > > CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); > > lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL); > > } > > @@ -986,6 +1010,10 @@ > > ipi_all_but_self(u_int ipi) > > { > > > > + if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { > > + ipi_selected(PCPU_GET(other_cpus), ipi); > > + return; > > + } > > CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); > > lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS); > > } > > @@ -997,11 +1025,15 @@ > > ipi_self(u_int ipi) > > { > > > > + if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { > > + ipi_selected(PCPU_GET(cpumask), ipi); > > + return; > > + } > > CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); > > lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF); > > } > > > > -#ifdef KDB_STOP_NMI > > +#ifdef STOP_NMI > > /* > > * send NMI IPI to selected CPUs > > */ > > @@ -1011,7 +1043,6 @@ > > void > > ipi_nmi_selected(u_int32_t cpus) > > { > > - > > int cpu; > > register_t icrlo; > > > > @@ -1020,10 +1051,8 @@ > > > > CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus); > > > > - > > atomic_set_int(&ipi_nmi_pending, cpus); > > > > - > > while ((cpu = ffs(cpus)) != 0) { > > cpu--; > > cpus &= ~(1 << cpu); > > @@ -1035,43 +1064,43 @@ > > if (!lapic_ipi_wait(BEFORE_SPIN)) > > panic("ipi_nmi_selected: previous IPI has not cleared"); > > > > - lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]); > > + lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]); > > } > > } > > > > - > > int > > ipi_nmi_handler() > > { > > - int cpu = PCPU_GET(cpuid); > > - void *restartfunc(void); > > + int cpu = PCPU_GET(cpuid); > > + int cpumask = PCPU_GET(cpumask); > > + void (*restartfunc)(void); > > > > - if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu))) > > + if (!(ipi_nmi_pending & cpumask)) > > return 1; > > > > - atomic_clear_int(&ipi_nmi_pending,1 << cpu); > > + atomic_clear_int(&ipi_nmi_pending, cpumask); > > > > savectx(&stoppcbs[cpu]); > > > > /* Indicate that we are stopped */ > > - atomic_set_int(&stopped_cpus,1 << cpu); > > - > > + atomic_set_int(&stopped_cpus, cpumask); > > > > /* Wait for restart */ > > - while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu))) > > + while (!(started_cpus & cpumask)) > > ia32_pause(); > > > > - atomic_clear_int(&started_cpus,1 << cpu); > > - atomic_clear_int(&stopped_cpus,1 << cpu); > > + atomic_clear_int(&started_cpus, cpumask); > > + atomic_clear_int(&stopped_cpus, cpumask); > > > > - restartfunc = (void > > (*)(void))atomic_readandclear_long(&cpustop_restartfunc); + restartfunc = > > (void (*)(void))atomic_readandclear_long( > > + (u_long *)&cpustop_restartfunc); > > if (restartfunc != NULL) > > restartfunc(); > > > > return 0; > > } > > > > -#endif /* KDB_STOP_NMI */ > > +#endif /* STOP_NMI */ > > > > /* > > * This is called once the rest of the system is up and running and we're > > > > ==== //depot/projects/smpng/sys/amd64/amd64/trap.c#36 (text+ko) ==== > > > > @@ -167,13 +167,13 @@ > > PCPU_LAZY_INC(cnt.v_trap); > > type = frame.tf_trapno; > > > > -#ifdef KDB_STOP_NMI > > - /* Handler for NMI IPIs used for debugging */ > > +#ifdef STOP_NMI > > + /* Handler for NMI IPIs used for stopping CPUs. */ > > if (type == T_NMI) { > > if (ipi_nmi_handler() == 0) > > goto out; > > } > > -#endif /* KDB_STOP_NMI */ > > +#endif /* STOP_NMI */ > > > > #ifdef KDB > > if (kdb_active) { > > > > ==== //depot/projects/smpng/sys/amd64/conf/NOTES#22 (text+ko) ==== > > > > @@ -39,7 +39,7 @@ > > # > > # Debugging options. > > # > > -options KDB_STOP_NMI # Stop CPUS using NMI instead of IPI > > +options STOP_NMI # Stop CPUS using NMI instead of IPI > > > > > > > > > > ==== //depot/projects/smpng/sys/amd64/include/smp.h#9 (text+ko) ==== > > > > @@ -63,9 +63,8 @@ > > void smp_invltlb(void); > > void smp_masked_invltlb(u_int mask); > > > > -#ifdef KDB_STOP_NMI > > -int ipi_nmi_handler(void); > > -void ipi_nmi_selected(u_int32_t cpus); > > +#ifdef STOP_NMI > > +int ipi_nmi_handler(void); > > #endif > > > > #endif /* !LOCORE */ > > > > ==== //depot/projects/smpng/sys/conf/options.amd64#14 (text+ko) ==== > > > > @@ -57,4 +57,4 @@ > > DEV_ATPIC opt_atpic.h > > > > # Debugging > > -KDB_STOP_NMI opt_kdb.h > > +STOP_NMI opt_cpu.h > > > > ==== //depot/projects/smpng/sys/conf/options.i386#50 (text+ko) ==== > > > > @@ -161,5 +161,5 @@ > > ASR_COMPAT opt_asr.h > > > > # Debugging > > -KDB_STOP_NMI opt_kdb.h > > +STOP_NMI opt_cpu.h > > NPX_DEBUG opt_npx.h > > > > ==== //depot/projects/smpng/sys/conf/options.pc98#49 (text+ko) ==== > > > > @@ -103,5 +103,5 @@ > > DEV_NPX opt_npx.h > > > > # Debugging > > -KDB_STOP_NMI opt_kdb.h > > +STOP_NMI opt_cpu.h > > NPX_DEBUG opt_npx.h > > > > ==== //depot/projects/smpng/sys/i386/conf/NOTES#108 (text+ko) ==== > > > > @@ -56,7 +56,7 @@ > > > > # Debugging options. > > # > > -options KDB_STOP_NMI # Stop CPUS using NMI instead of IPI > > +options STOP_NMI # Stop CPUS using NMI instead of IPI > > > > > > > > > > ==== //depot/projects/smpng/sys/i386/i386/mp_machdep.c#91 (text+ko) ==== > > > > @@ -28,7 +28,6 @@ > > > > #include "opt_apic.h" > > #include "opt_cpu.h" > > -#include "opt_kdb.h" > > #include "opt_kstack_pages.h" > > #include "opt_mp_watchdog.h" > > #include "opt_sched.h" > > @@ -177,8 +176,10 @@ > > vm_offset_t smp_tlb_addr2; > > volatile int smp_tlb_wait; > > > > -#ifdef KDB_STOP_NMI > > +#ifdef STOP_NMI > > volatile cpumask_t ipi_nmi_pending; > > + > > +static void ipi_nmi_selected(u_int32_t cpus); > > #endif > > > > #ifdef COUNT_IPIS > > @@ -198,6 +199,20 @@ > > * Local data and functions. > > */ > > > > +#ifdef STOP_NMI > > +/* > > + * Provide an alternate method of stopping other CPUs. If another CPU > > has + * disabled interrupts the conventional STOP IPI will be blocked. > > This + * NMI-based stop should get through in that case. > > + */ > > +static int stop_cpus_with_nmi = 1; > > +SYSCTL_INT(_debug, OID_AUTO, stop_cpus_with_nmi, CTLTYPE_INT | > > CTLFLAG_RW, + &stop_cpus_with_nmi, 0, ""); > > +TUNABLE_INT("debug.stop_cpus_with_nmi", &stop_cpus_with_nmi); > > +#else > > +#define stop_cpus_with_nmi 0 > > +#endif > > + > > static u_int logical_cpus; > > > > /* used to hold the AP's until we are ready to release them */ > > @@ -1186,6 +1201,12 @@ > > ipi = IPI_BITMAP_VECTOR; > > } > > > > +#ifdef STOP_NMI > > + if (ipi == IPI_STOP && stop_cpus_with_nmi) { > > + ipi_nmi_selected(cpus); > > + return; > > + } > > +#endif > > CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi); > > while ((cpu = ffs(cpus)) != 0) { > > cpu--; > > @@ -1216,6 +1237,10 @@ > > ipi_all(u_int ipi) > > { > > > > + if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { > > + ipi_selected(all_cpus, ipi); > > + return; > > + } > > CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); > > lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL); > > } > > @@ -1227,6 +1252,10 @@ > > ipi_all_but_self(u_int ipi) > > { > > > > + if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { > > + ipi_selected(PCPU_GET(other_cpus), ipi); > > + return; > > + } > > CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); > > lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS); > > } > > @@ -1238,11 +1267,15 @@ > > ipi_self(u_int ipi) > > { > > > > + if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { > > + ipi_selected(PCPU_GET(cpumask), ipi); > > + return; > > + } > > CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); > > lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF); > > } > > > > -#ifdef KDB_STOP_NMI > > +#ifdef STOP_NMI > > /* > > * send NMI IPI to selected CPUs > > */ > > @@ -1252,7 +1285,6 @@ > > void > > ipi_nmi_selected(u_int32_t cpus) > > { > > - > > int cpu; > > register_t icrlo; > > > > @@ -1261,10 +1293,8 @@ > > > > CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus); > > > > - > > atomic_set_int(&ipi_nmi_pending, cpus); > > > > - > > while ((cpu = ffs(cpus)) != 0) { > > cpu--; > > cpus &= ~(1 << cpu); > > @@ -1276,43 +1306,43 @@ > > if (!lapic_ipi_wait(BEFORE_SPIN)) > > panic("ipi_nmi_selected: previous IPI has not cleared"); > > > > - lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]); > > + lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]); > > } > > } > > > > - > > int > > ipi_nmi_handler() > > { > > - int cpu = PCPU_GET(cpuid); > > - void *restartfunc(void); > > + int cpu = PCPU_GET(cpuid); > > + int cpumask = PCPU_GET(cpumask); > > + void (*restartfunc)(void); > > > > - if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu))) > > + if (!(ipi_nmi_pending & cpumask)) > > return 1; > > > > - atomic_clear_int(&ipi_nmi_pending,1 << cpu); > > + atomic_clear_int(&ipi_nmi_pending, cpumask); > > > > savectx(&stoppcbs[cpu]); > > > > /* Indicate that we are stopped */ > > - atomic_set_int(&stopped_cpus,1 << cpu); > > + atomic_set_int(&stopped_cpus, cpumask); > > > > - > > /* Wait for restart */ > > - while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu))) > > + while (!(started_cpus & cpumask)) > > ia32_pause(); > > > > - atomic_clear_int(&started_cpus,1 << cpu); > > - atomic_clear_int(&stopped_cpus,1 << cpu); > > + atomic_clear_int(&started_cpus, cpumask); > > + atomic_clear_int(&stopped_cpus, cpumask); > > > > - restartfunc = (void > > (*)(void))atomic_readandclear_int(&cpustop_restartfunc); + restartfunc = > > (void (*)(void))atomic_readandclear_int( > > + (u_int *)&cpustop_restartfunc); > > if (restartfunc != NULL) > > restartfunc(); > > > > return 0; > > } > > > > -#endif /* KDB_STOP_NMI */ > > +#endif /* STOP_NMI */ > > > > /* > > * This is called once the rest of the system is up and running and we're > > > > ==== //depot/projects/smpng/sys/i386/i386/trap.c#86 (text+ko) ==== > > > > @@ -185,13 +185,13 @@ > > PCPU_LAZY_INC(cnt.v_trap); > > type = frame.tf_trapno; > > > > -#ifdef KDB_STOP_NMI > > - /* Handler for NMI IPIs used for debugging */ > > +#ifdef STOP_NMI > > + /* Handler for NMI IPIs used for stopping CPUs. */ > > if (type == T_NMI) { > > if (ipi_nmi_handler() == 0) > > goto out; > > } > > -#endif /* KDB_STOP_NMI */ > > +#endif /* STOP_NMI */ > > > > #ifdef KDB > > if (kdb_active) { > > > > ==== //depot/projects/smpng/sys/i386/include/smp.h#17 (text+ko) ==== > > > > @@ -79,9 +79,8 @@ > > void smp_invltlb(void); > > void smp_masked_invltlb(u_int mask); > > > > -#ifdef KDB_STOP_NMI > > -int ipi_nmi_handler(void); > > -void ipi_nmi_selected(u_int32_t cpus); > > +#ifdef STOP_NMI > > +int ipi_nmi_handler(void); > > #endif > > > > #endif /* !LOCORE */ > > > > ==== //depot/projects/smpng/sys/kern/subr_kdb.c#12 (text+ko) ==== > > > > @@ -42,16 +42,11 @@ > > #include <machine/kdb.h> > > #include <machine/pcb.h> > > > > -#ifdef KDB_STOP_NMI > > +#ifdef SMP > > +#if defined (__i386__) || defined(__amd64__) > > +#define HAVE_STOPPEDPCBS > > #include <machine/smp.h> > > #endif > > - > > -/* > > - * KDB_STOP_NMI requires SMP to pick up the right dependencies > > - * (And isn't useful on UP anyway) > > - */ > > -#if defined(KDB_STOP_NMI) && !defined(SMP) > > -#error "options KDB_STOP_NMI" requires "options SMP" > > #endif > > > > int kdb_active = 0; > > @@ -91,19 +86,6 @@ > > SYSCTL_INT(_debug_kdb, OID_AUTO, stop_cpus, CTLTYPE_INT | CTLFLAG_RW, > > &kdb_stop_cpus, 0, ""); > > TUNABLE_INT("debug.kdb.stop_cpus", &kdb_stop_cpus); > > - > > -#ifdef KDB_STOP_NMI > > -/* > > - * Provide an alternate method of stopping other CPUs. If another CPU > > has - * disabled interrupts the conventional STOP IPI will be blocked. > > This - * NMI-based stop should get through in that case. > > - */ > > -static int kdb_stop_cpus_with_nmi = 1; > > -SYSCTL_INT(_debug_kdb, OID_AUTO, stop_cpus_with_nmi, CTLTYPE_INT | > > CTLFLAG_RW, - &kdb_stop_cpus_with_nmi, 0, ""); > > -TUNABLE_INT("debug.kdb.stop_cpus_with_nmi", &kdb_stop_cpus_with_nmi); > > -#endif /* KDB_STOP_NMI */ > > - > > #endif > > > > static int > > @@ -335,27 +317,24 @@ > > > > struct pcb * > > kdb_thr_ctx(struct thread *thr) > > -#ifdef KDB_STOP_NMI > > { > > - u_int cpuid; > > - struct pcpu *pc; > > +#ifdef HAVE_STOPPEDPCBS > > + struct pcpu *pc; > > + u_int cpuid; > > +#endif > > > > - if (thr == curthread) > > - return &kdb_pcb; > > + if (thr == curthread) > > + return &kdb_pcb; > > > > - SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > > - cpuid = pc->pc_cpuid; > > - if (pc->pc_curthread == thr && (atomic_load_acq_int(&stopped_cpus) & > > (1 << cpuid))) - return &stoppcbs[cpuid]; > > - } > > - > > - return thr->td_pcb; > > -} > > -#else > > -{ > > - return ((thr == curthread) ? &kdb_pcb : thr->td_pcb); > > +#ifdef HAVE_STOPPEDPCBS > > + SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { > > + cpuid = pc->pc_cpuid; > > + if (pc->pc_curthread == thr && (stopped_cpus & (1 << cpuid))) > > + return &stoppcbs[cpuid]; > > + } > > +#endif > > + return thr->td_pcb; > > } > > -#endif /* KDB_STOP_NMI */ > > > > struct thread * > > kdb_thr_first(void) > > @@ -452,14 +431,7 @@ > > > > #ifdef SMP > > if ((did_stop_cpus = kdb_stop_cpus) != 0) > > - { > > -#ifdef KDB_STOP_NMI > > - if(kdb_stop_cpus_with_nmi) > > - stop_cpus_nmi(PCPU_GET(other_cpus)); > > - else > > -#endif /* KDB_STOP_NMI */ > > stop_cpus(PCPU_GET(other_cpus)); > > - } > > #endif > > > > kdb_frame = tf; > > > > ==== //depot/projects/smpng/sys/kern/subr_smp.c#42 (text+ko) ==== > > > > @@ -35,8 +35,6 @@ > > #include <sys/cdefs.h> > > __FBSDID("$FreeBSD: src/sys/kern/subr_smp.c,v 1.196 2005/06/30 03:38:10 > > peter Exp $"); > > > > -#include "opt_kdb.h" > > - > > #include <sys/param.h> > > #include <sys/systm.h> > > #include <sys/kernel.h> > > @@ -258,36 +256,6 @@ > > return 1; > > } > > > > -#ifdef KDB_STOP_NMI > > -int > > -stop_cpus_nmi(cpumask_t map) > > -{ > > - int i; > > - > > - if (!smp_started) > > - return 0; > > - > > - CTR1(KTR_SMP, "stop_cpus(%x)", map); > > - > > - /* send the stop IPI to all CPUs in map */ > > - ipi_nmi_selected(map); > > - > > - i = 0; > > - while ((atomic_load_acq_int(&stopped_cpus) & map) != map) { > > - /* spin */ > > - i++; > > -#ifdef DIAGNOSTIC > > - if (i == 100000) { > > - printf("timeout stopping cpus\n"); > > - break; > > - } > > -#endif > > - } > > - > > - return 1; > > -} > > -#endif /* KDB_STOP_NMI */ > > - > > /* > > * Called by a CPU to restart stopped CPUs. > > * > > > > ==== //depot/projects/smpng/sys/notes#61 (text+ko) ==== > > > > @@ -67,6 +67,12 @@ > > do this. That really needs to only be done in spinlock_*() (and maybe > > some frobbing in intr_execute_handlers()) and needs to happen after the > > spinlock_*() changes happen. > > +- Change KDB_STOP_NMI to STOP_NMI and make it non-kdb specific > > + - i386 > > + + compiles > > + - amd64 > > + - use stoppcbs[] on i386 and amd64 always > > + - untested > > - KTR change: include __func__ along with __FILE__ and __LINE__ and > > display it after cpu number by default > > - Cheapen global stats counters via PCPU_LAZY_INC > > > > ==== //depot/projects/smpng/sys/pc98/conf/NOTES#45 (text+ko) ==== > > > > @@ -39,7 +39,7 @@ > > > > # Debugging options. > > # > > -options KDB_STOP_NMI # Stop CPUS using NMI instead of IPI > > +options STOP_NMI # Stop CPUS using NMI instead of IPI > > > > > > > > > > ==== //depot/projects/smpng/sys/sys/smp.h#15 (text+ko) ==== > > > > @@ -102,9 +102,6 @@ > > void smp_rendezvous_action(void); > > extern struct mtx smp_ipi_mtx; > > > > -#ifdef KDB_STOP_NMI > > -int stop_cpus_nmi(cpumask_t); > > -#endif > > #endif /* SMP */ > > void smp_rendezvous(void (*)(void *), > > void (*)(void *), -- John Baldwin <jhb@FreeBSD.org> <>< http://www.FreeBSD.org/~jhb/ "Power Users Use the Power to Serve" = http://www.FreeBSD.org
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200510191117.15536.jhb>