Date: Sun, 26 Jan 2003 15:39:58 -0800 (PST) From: Matthew Dillon <dillon@apollo.backplane.com> To: Jeff Roberson <jroberson@chesapeake.net>, Julian Elischer <julian@elischer.org>, Steve Kargl <sgk@troutmask.apl.washington.edu>, Robert Watson <rwatson@FreeBSD.ORG>, Gary Jennejohn <garyj@jennejohn.org>, <arch@FreeBSD.ORG> Subject: Re: New scheduler - ULE performance w/ cpu stealing & no pickcpu Message-ID: <200301262339.h0QNdwK1069471@apollo.backplane.com> References: <20030126040154.A64928-100000@mail.chesapeake.net> <200301262045.h0QKjsVc067308@apollo.backplane.com>
next in thread | previous in thread | raw e-mail | index | archive | help
Here are the complete test results, the latest is with the stealcpu code plus pickcpu ripped out. The results without pickcpu are basically the same as with pickcpu (as expected). -Matt /usr/bin/time make -j 8 buildworld (local /usr/src, local usr/obj) 4BSD - Original scheduler 2414.84 real 2648.92 user 758.28 sys 2399.05 real 2647.84 user 757.78 sys ULE - 3435.42 real 2500.73 user 581.20 sys 3343.95 real 2501.86 user 581.67 sys ULE - STEALCPU/CHOOSE 2489.76 real 2610.33 user 659.74 sys ULE - STEALCPU/CHOOSE, WITHOUT PICKCPU 2486.76 real 2613.67 user 668.31 sys 2470.49 real 2611.28 user 665.16 sys Index: sched_ule.c =================================================================== RCS file: /home/ncvs/src/sys/kern/sched_ule.c,v retrieving revision 1.1 diff -u -r1.1 sched_ule.c --- sched_ule.c 26 Jan 2003 05:23:15 -0000 1.1 +++ sched_ule.c 26 Jan 2003 20:46:22 -0000 @@ -53,6 +53,9 @@ /* XXX This is bogus compatability crap for ps */ static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); +static int sched_stealcpu = 1; +SYSCTL_INT(_kern, OID_AUTO, sched_stealcpu, CTLFLAG_RW, &sched_stealcpu, 0, + "Ok to steal KSEs from another cpu (0=disabled, 1=normal)"); static void sched_setup(void *dummy); SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) @@ -181,7 +184,6 @@ static int sched_slice(struct ksegrp *kg); static int sched_priority(struct ksegrp *kg); void sched_pctcpu_update(struct kse *ke); -int sched_pickcpu(void); static void sched_setup(void *dummy) @@ -279,40 +281,6 @@ ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; } -#ifdef SMP -int -sched_pickcpu(void) -{ - int cpu; - int load; - int i; - - if (!smp_started) - return (0); - - cpu = PCPU_GET(cpuid); - load = kseq_cpu[cpu].ksq_load; - - for (i = 0; i < mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; - if (kseq_cpu[i].ksq_load < load) { - cpu = i; - load = kseq_cpu[i].ksq_load; - } - } - - CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); - return (cpu); -} -#else -int -sched_pickcpu(void) -{ - return (0); -} -#endif - void sched_prio(struct thread *td, u_char prio) { @@ -444,7 +412,6 @@ child->kg_user_pri = kg->kg_user_pri; ckse->ke_slice = pkse->ke_slice; - ckse->ke_oncpu = sched_pickcpu(); ckse->ke_runq = NULL; /* * Claim that we've been running for one second for statistical @@ -554,9 +521,26 @@ cpu = PCPU_GET(cpuid); kseq = &kseq_cpu[cpu]; - if (runq_check(kseq->ksq_curr) == 0) - return (runq_check(kseq->ksq_next)); - return (1); + if (runq_check(kseq->ksq_curr)) + return(1); + if (runq_check(kseq->ksq_next)) + return(1); + +#ifdef SMP + /* + * Check other cpus for runnable tasks + */ + if (sched_stealcpu) { + for (cpu = 0; cpu < mp_ncpus; ++cpu) { + kseq = &kseq_cpu[cpu]; + if (runq_check(kseq->ksq_curr)) + return(1); + if (runq_check(kseq->ksq_next)) + return(1); + } + } +#endif + return (0); } void @@ -573,30 +557,53 @@ } } +static __inline struct kse * -sched_choose(void) +sched_choose_kseq(struct kseq *kseq) { - struct kseq *kseq; struct kse *ke; struct runq *swap; - int cpu; - cpu = PCPU_GET(cpuid); - kseq = &kseq_cpu[cpu]; - if ((ke = runq_choose(kseq->ksq_curr)) == NULL) { swap = kseq->ksq_curr; kseq->ksq_curr = kseq->ksq_next; kseq->ksq_next = swap; ke = runq_choose(kseq->ksq_curr); } + return(ke); +} + +struct kse * +sched_choose(void) +{ + struct kseq *kseq; + struct kse *ke; + int cpu, i; + + cpu = PCPU_GET(cpuid); + kseq = &kseq_cpu[cpu]; + + ke = sched_choose_kseq(kseq); if (ke) { runq_remove(ke->ke_runq, ke); ke->ke_state = KES_THREAD; } - - return (ke); +#ifdef SMP + else if (sched_stealcpu) { + for (i = mp_ncpus - 1; ke == NULL && i; --i) { + cpu = (cpu + 1) % mp_ncpus; + kseq = &kseq_cpu[cpu]; + ke = sched_choose_kseq(kseq); + } + if (ke) { + runq_remove(ke->ke_runq, ke); + ke->ke_state = KES_THREAD; + } + } +#endif + return(ke); } + void sched_add(struct kse *ke) To Unsubscribe: send mail to majordomo@FreeBSD.org with "unsubscribe freebsd-arch" in the body of the message
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200301262339.h0QNdwK1069471>