Date: Wed, 26 Mar 2008 18:16:43 GMT From: Peter Wemm <peter@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 138638 for review Message-ID: <200803261816.m2QIGhXq074642@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=138638 Change 138638 by peter@peter_overcee on 2008/03/26 18:16:03 IFC @138629 Affected files ... .. //depot/projects/bike_sched/sys/kern/sched_ule.c#23 integrate Differences ... ==== //depot/projects/bike_sched/sys/kern/sched_ule.c#23 (text+ko) ==== @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/sched_ule.c,v 1.234 2008/03/16 10:58:05 rwatson Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/sched_ule.c,v 1.238 2008/03/20 05:51:16 jeff Exp $"); #include "opt_hwpmc_hooks.h" #include "opt_sched.h" @@ -260,7 +260,7 @@ static void sched_pctcpu_update(struct thread *); /* Operations on per processor queues */ -static struct thread * tdq_choose(struct tdq *); +static struct thread *tdq_choose(struct tdq *); static void tdq_setup(struct tdq *); static void tdq_load_add(struct tdq *, struct thread *); static void tdq_load_rem(struct tdq *, struct thread *); @@ -275,7 +275,7 @@ static int tdq_idled(struct tdq *); static void tdq_notify(struct tdq *, struct thread *); static struct thread *tdq_steal(struct tdq *, int); -static struct thread *runq_steal(struct runq *); +static struct thread *runq_steal(struct runq *, int); static int sched_pickcpu(struct thread *, int); static void sched_balance(void); static int sched_balance_pair(struct tdq *, struct tdq *); @@ -311,9 +311,10 @@ if (rq->rq_status.rqb_bits[i] & (1ul << j)) { pri = j + (i << RQB_L2BPW); rqh = &rq->rq_queues[pri]; - TAILQ_FOREACH(td, rqh, td_procq) { + TAILQ_FOREACH(td, rqh, td_runq) { printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", - td, td->td_name, td->td_priority, td->td_rqindex, pri); + td, td->td_name, td->td_priority, + td->td_rqindex, pri); } } } @@ -387,18 +388,19 @@ static __inline void tdq_runq_add(struct tdq *tdq, struct thread *td, int flags) { + struct td_sched *ts; u_char pri; - struct td_sched *ts = TD_TO_TS(td); TDQ_LOCK_ASSERT(tdq, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); - TD_SET_RUNQ(ts->ts_thread); + pri = td->td_priority; + ts = TD_TO_TS(td); + TD_SET_RUNQ(td); if (THREAD_CAN_MIGRATE(td)) { tdq->tdq_transferable++; ts->ts_flags |= TSF_XFERABLE; } - pri = td->td_priority; if (pri <= PRI_MAX_REALTIME) { ts->ts_runq = &tdq->tdq_realtime; } else if (pri <= PRI_MAX_TIMESHARE) { @@ -422,11 +424,11 @@ pri = (unsigned char)(pri - 1) % RQ_NQS; } else pri = tdq->tdq_ridx; - runq_add_pri(ts->ts_runq, ts, pri, flags); + runq_add_pri(ts->ts_runq, td, pri, flags); return; } else ts->ts_runq = &tdq->tdq_idle; - runq_add(ts->ts_runq, ts, flags); + runq_add(ts->ts_runq, td, flags); } /* @@ -437,9 +439,10 @@ static __inline void tdq_runq_rem(struct tdq *tdq, struct thread *td) { - struct td_sched *ts = TD_TO_TS(td); + struct td_sched *ts; + + ts = TD_TO_TS(td); TDQ_LOCK_ASSERT(tdq, MA_OWNED); - KASSERT(ts->ts_runq != NULL, ("tdq_runq_remove: thread %p null ts_runq", td)); if (ts->ts_flags & TSF_XFERABLE) { tdq->tdq_transferable--; @@ -451,7 +454,7 @@ else runq_remove_idx(ts->ts_runq, td, NULL); } else - runq_remove(ts->ts_runq, ts); + runq_remove(ts->ts_runq, td); } /* @@ -461,15 +464,16 @@ static void tdq_load_add(struct tdq *tdq, struct thread *td) { + struct td_sched *ts; int class; + ts = td->td_sched; TDQ_LOCK_ASSERT(tdq, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); class = PRI_BASE(td->td_pri_class); tdq->tdq_load++; CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); - if (class != PRI_ITHD && - (td->td_proc->p_flag & P_NOLOAD) == 0) + if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0) tdq->tdq_sysload++; } @@ -480,19 +484,20 @@ static void tdq_load_rem(struct tdq *tdq, struct thread *td) { + struct td_sched *ts; int class; + ts = TD_TO_TS(td); THREAD_LOCK_ASSERT(td, MA_OWNED); TDQ_LOCK_ASSERT(tdq, MA_OWNED); class = PRI_BASE(td->td_pri_class); - if (class != PRI_ITHD && - (td->td_proc->p_flag & P_NOLOAD) == 0) + if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0) tdq->tdq_sysload--; KASSERT(tdq->tdq_load != 0, ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); tdq->tdq_load--; CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); - TD_TO_TS(td)->ts_runq = NULL; + ts->ts_runq = NULL; } /* @@ -502,16 +507,13 @@ static void tdq_setlowpri(struct tdq *tdq, struct thread *ctd) { - struct td_sched *ts; struct thread *td; TDQ_LOCK_ASSERT(tdq, MA_OWNED); if (ctd == NULL) ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread; - ts = tdq_choose(tdq); - if (ts) - td = ts->ts_thread; - if (ts == NULL || td->td_priority > ctd->td_priority) + td = tdq_choose(tdq); + if (td == NULL || td->td_priority > ctd->td_priority) tdq->tdq_lowpri = ctd->td_priority; else tdq->tdq_lowpri = td->td_priority; @@ -843,7 +845,6 @@ tdq_move(struct tdq *from, struct tdq *to) { struct thread *td; - struct thread *td; struct tdq *tdq; int cpu; @@ -938,6 +939,8 @@ if (tdq->tdq_ipipending) return; + cpu = TD_TO_TS(td)->ts_cpu; + pri = td->td_priority; cpri = pcpu_find(cpu)->pc_curthread->td_priority; if (!sched_shouldpreempt(pri, cpri, 1)) return; @@ -952,9 +955,9 @@ static struct thread * runq_steal_from(struct runq *rq, int cpu, u_char start) { - struct thread *td; struct rqbits *rqb; struct rqhead *rqh; + struct thread *td; int first; int bit; int pri; @@ -978,7 +981,7 @@ pri = RQB_FFS(rqb->rqb_bits[i]); pri += (i << RQB_L2BPW); rqh = &rq->rq_queues[pri]; - TAILQ_FOREACH(td, rqh, td_procq) { + TAILQ_FOREACH(td, rqh, td_runq) { if (first && THREAD_CAN_MIGRATE(td) && THREAD_CAN_SCHED(td, cpu)) return (td); @@ -1013,7 +1016,7 @@ if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) continue; rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; - TAILQ_FOREACH(td, rqh, td_procq) + TAILQ_FOREACH(td, rqh, td_runq) if (THREAD_CAN_MIGRATE(td) && THREAD_CAN_SCHED(td, cpu)) return (td); @@ -1028,13 +1031,13 @@ static struct thread * tdq_steal(struct tdq *tdq, int cpu) { - struct td_sched *td; + struct thread *td; TDQ_LOCK_ASSERT(tdq, MA_OWNED); if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) return (td); - if ((td = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx)) - != NULL) + if ((td = runq_steal_from(&tdq->tdq_timeshare, + cpu, tdq->tdq_ridx)) != NULL) return (td); return (runq_steal(&tdq->tdq_idle, cpu)); } @@ -1046,16 +1049,17 @@ static inline struct tdq * sched_setcpu(struct thread *td, int cpu, int flags) { - struct thread *td; + struct tdq *tdq; THREAD_LOCK_ASSERT(td, MA_OWNED); - tdq = TDQ_CPU(cpu); + td->td_sched->ts_cpu = cpu; ts = TD_TO_TS(td); ts->ts_cpu = cpu; - - /* If the lock matches just return the queue. */ + /* + * If the lock matches just return the queue. + */ if (td->td_lock == TDQ_LOCKPTR(tdq)) return (tdq); #ifdef notyet @@ -1149,6 +1153,7 @@ static struct thread * tdq_choose(struct tdq *tdq) { + struct thread *td; TDQ_LOCK_ASSERT(tdq, MA_OWNED); td = runq_choose(&tdq->tdq_realtime); @@ -1161,7 +1166,6 @@ td->td_priority)); return (td); } - td = runq_choose(&tdq->tdq_idle); if (td != NULL) { KASSERT(td->td_priority >= PRI_MIN_IDLE, @@ -1239,7 +1243,7 @@ /* Add thread0's load since it's running. */ TDQ_LOCK(tdq); thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); - tdq_load_add(tdq, TD_TO_TS(&td_sched0)); + tdq_load_add(tdq, &thread0); tdq->tdq_lowpri = thread0.td_priority; TDQ_UNLOCK(tdq); } @@ -1521,28 +1525,33 @@ THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority == prio) return; - + /* + * If the priority has been elevated due to priority + * propagation, we may have to move ourselves to a new + * queue. This could be optimized to not re-add in some + * cases. + */ if (TD_ON_RUNQ(td) && prio < td->td_priority) { - /* - * If the priority has been elevated due to priority - * propagation, we may have to move ourselves to a new - * queue. This could be optimized to not re-add in some - * cases. - */ sched_rem(td); td->td_priority = prio; sched_add(td, SRQ_BORROWING); return; } - tdq = TDQ_CPU(ts->ts_cpu); - oldpri = td->td_priority; - td->td_priority = prio; + /* + * If the thread is currently running we may have to adjust the lowpri + * information so other cpus are aware of our current priority. + */ if (TD_IS_RUNNING(td)) { + tdq = TDQ_CPU(ts->ts_cpu); + oldpri = td->td_priority; + td->td_priority = prio; if (prio < tdq->tdq_lowpri) tdq->tdq_lowpri = prio; else if (tdq->tdq_lowpri == oldpri) tdq_setlowpri(tdq, td); + return; } + td->td_priority = prio; } /* @@ -1654,26 +1663,6 @@ } /* - * Add the thread passed as 'newtd' to the run queue before selecting - * the next thread to run. This is only used for preemption. - */ -static void -sched_switchin(struct tdq *tdq, struct thread *td) -{ -#ifdef SMP - spinlock_enter(); - TDQ_UNLOCK(tdq); - thread_lock(td); - spinlock_exit(); - sched_setcpu(td, TDQ_ID(tdq), SRQ_YIELDING); -#else - td->td_lock = TDQ_LOCKPTR(tdq); -#endif - tdq_add(tdq, td, SRQ_YIELDING); - MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); -} - -/* * Block a thread for switching. Similar to thread_block() but does not * bump the spin count. */ @@ -1701,7 +1690,7 @@ tdn = TDQ_CPU(TD_TO_TS(td)->ts_cpu); #ifdef SMP - tdq_load_rem(tdq, td->td_sched); + tdq_load_rem(tdq, td); /* * Do the lock dance required to avoid LOR. We grab an extra * spinlock nesting to prevent preemption while we're @@ -1753,6 +1742,7 @@ int cpuid; THREAD_LOCK_ASSERT(td, MA_OWNED); + KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument")); cpuid = PCPU_GET(cpuid); tdq = TDQ_CPU(cpuid); @@ -1776,14 +1766,14 @@ SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING; if (ts->ts_cpu == cpuid) - tdq_runq_add(tdq, ts, srqflag); + tdq_runq_add(tdq, td, srqflag); else mtx = sched_switch_migrate(tdq, td, srqflag); } else { /* This thread must be going to sleep. */ TDQ_LOCK(tdq); mtx = thread_block_switch(td); - tdq_load_rem(tdq, ts); + tdq_load_rem(tdq, td); } /* * We enter here with the thread blocked and assigned to the @@ -1791,12 +1781,6 @@ * thread-queue locked. */ TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); - /* - * If preemption assigned a new thread just add it here and let choosethread - * select the best one. - */ - if (newtd != NULL) - sched_switchin(tdq, newtd); newtd = choosethread(); /* * Call the MD code to switch contexts if necessary. @@ -1825,10 +1809,6 @@ } else thread_unblock_switch(td, mtx); /* - * We should always get here with the lowest priority td possible. - */ - tdq->tdq_lowpri = td->td_priority; - /* * Assert that all went well and return. */ TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); @@ -1845,7 +1825,6 @@ struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); - PROC_SLOCK_ASSERT(p, MA_OWNED); p->p_nice = nice; FOREACH_THREAD_IN_PROC(p, td) { @@ -1914,17 +1893,16 @@ struct td_sched *ts; struct td_sched *ts2; + THREAD_LOCK_ASSERT(td, MA_OWNED); /* * Initialize child. */ - THREAD_LOCK_ASSERT(td, MA_OWNED); - sched_newthread(child); + ts = TD_TO_TS(td); + ts2 = TD_TO_TS(child); child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); child->td_cpuset = cpuset_ref(td->td_cpuset); - ts = TD_TO_TS(td); - ts2 = TD_TO_TS(child); ts2->ts_cpu = ts->ts_cpu; - ts2->ts_runq = NULL; + ts2->ts_flags = 0; /* * Grab our parents cpu estimation information and priority. */ @@ -1959,21 +1937,6 @@ THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_pri_class == class) return; - /* - * On SMP if we're on the RUNQ we must adjust the transferable - * count because could be changing to or from an interrupt - * class. - */ - if (TD_ON_RUNQ(td)) { - struct tdq *tdq; - - tdq = TDQ_CPU(TD_TO_TS(td)->ts_cpu); - if (THREAD_CAN_MIGRATE(td)) - tdq->tdq_transferable--; - td->td_pri_class = class; - if (THREAD_CAN_MIGRATE(td)) - tdq->tdq_transferable++; - } td->td_pri_class = class; } @@ -2158,15 +2121,15 @@ struct thread * sched_choose(void) { - struct td_sched *ts; + struct thread *td; struct tdq *tdq; tdq = TDQ_SELF(); TDQ_LOCK_ASSERT(tdq, MA_OWNED); - ts = tdq_choose(tdq); - if (ts) { - ts->ts_ltick = ticks; - tdq_runq_rem(tdq, ts); + td = tdq_choose(tdq); + if (td) { + TD_TO_TS(td)->ts_ltick = ticks; + tdq_runq_rem(tdq, td); return (td); } return (PCPU_GET(idlethread)); @@ -2205,7 +2168,6 @@ void tdq_add(struct tdq *tdq, struct thread *td, int flags) { - struct td_sched *ts; TDQ_LOCK_ASSERT(tdq, MA_OWNED); KASSERT((td->td_inhibitors == 0), @@ -2215,11 +2177,10 @@ KASSERT(td->td_flags & TDF_INMEM, ("sched_add: thread swapped out")); - ts = TD_TO_TS(td); if (td->td_priority < tdq->tdq_lowpri) tdq->tdq_lowpri = td->td_priority; - tdq_runq_add(tdq, ts, flags); - tdq_load_add(tdq, ts); + tdq_runq_add(tdq, td, flags); + tdq_load_add(tdq, td); } /* @@ -2231,7 +2192,6 @@ { struct tdq *tdq; #ifdef SMP - struct td_sched *ts; int cpu; #endif CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", @@ -2249,12 +2209,11 @@ * Pick the destination cpu and if it isn't ours transfer to the * target cpu. */ - ts = TD_TO_TS(td); - cpu = sched_pickcpu(ts, flags); + cpu = sched_pickcpu(td, flags); tdq = sched_setcpu(td, cpu, flags); tdq_add(tdq, td, flags); if (cpu != PCPU_GET(cpuid)) { - tdq_notify(tdq, ts); + tdq_notify(tdq, td); return; } #else @@ -2286,19 +2245,17 @@ sched_rem(struct thread *td) { struct tdq *tdq; - struct td_sched *ts; CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", td, td->td_name, td->td_priority, curthread, curthread->td_name); - ts = TD_TO_TS(td); - tdq = TDQ_CPU(ts->ts_cpu); + tdq = TDQ_CPU(TD_TO_TS(td)->ts_cpu); TDQ_LOCK_ASSERT(tdq, MA_OWNED); MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); KASSERT(TD_ON_RUNQ(td), ("sched_rem: thread not on run queue")); - tdq_runq_rem(tdq, ts); - tdq_load_rem(tdq, ts); + tdq_runq_rem(tdq, td); + tdq_load_rem(tdq, td); TD_SET_CAN_RUN(td); if (td->td_priority == tdq->tdq_lowpri) tdq_setlowpri(tdq, NULL); @@ -2358,7 +2315,7 @@ * an ipi to force the issue. */ cpu = ts->ts_cpu; - ts->ts_cpu = sched_pickcpu(ts, 0); + ts->ts_cpu = sched_pickcpu(td, 0); if (cpu != PCPU_GET(cpuid)) ipi_selected(1 << cpu, IPI_PREEMPT); #endif @@ -2500,7 +2457,7 @@ spinlock_exit(); } else { MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); - tdq_load_rem(tdq, TD_TO_TS(td)); + tdq_load_rem(tdq, td); lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); } KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); @@ -2536,12 +2493,9 @@ TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); lock_profile_obtain_lock_success( &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); - tdq->tdq_lowpri = td->td_priority; } - -static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, - "Scheduler"); +SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, "Scheduler name"); SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200803261816.m2QIGhXq074642>