From owner-p4-projects@FreeBSD.ORG Sun Jul 27 13:13:22 2008 Return-Path: Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id B03001065684; Sun, 27 Jul 2008 13:13:22 +0000 (UTC) Delivered-To: perforce@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 5B9F81065680 for ; Sun, 27 Jul 2008 13:13:22 +0000 (UTC) (envelope-from jhb@freebsd.org) Received: from repoman.freebsd.org (repoman.freebsd.org [IPv6:2001:4f8:fff6::29]) by mx1.freebsd.org (Postfix) with ESMTP id 4EBC58FC12 for ; Sun, 27 Jul 2008 13:13:22 +0000 (UTC) (envelope-from jhb@freebsd.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.14.2/8.14.2) with ESMTP id m6RDDM9I003109 for ; Sun, 27 Jul 2008 13:13:22 GMT (envelope-from jhb@freebsd.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.14.2/8.14.1/Submit) id m6RDDMwY003107 for perforce@freebsd.org; Sun, 27 Jul 2008 13:13:22 GMT (envelope-from jhb@freebsd.org) Date: Sun, 27 Jul 2008 13:13:22 GMT Message-Id: <200807271313.m6RDDMwY003107@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to jhb@freebsd.org using -f From: John Baldwin To: Perforce Change Reviews Cc: Subject: PERFORCE change 146044 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 27 Jul 2008 13:13:23 -0000 http://perforce.freebsd.org/chv.cgi?CH=146044 Change 146044 by jhb@jhb_zion on 2008/07/27 13:12:26 Better affinity handing: - Set a flag on the thread when a cpuset is applied if the set doesn't include all CPUs. This lets us avoid invoking sched_pickcpu() in the common case in sched_add(). - Handle threads executing on other CPUs and threads that are on a runqueue in sched_affinity(). Affected files ... .. //depot/projects/smpng/sys/kern/sched_4bsd.c#76 edit Differences ... ==== //depot/projects/smpng/sys/kern/sched_4bsd.c#76 (text+ko) ==== @@ -97,6 +97,7 @@ /* flags kept in td_flags */ #define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ #define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */ +#define TDF_AFFINITY TDF_SCHED2 /* Has a non-"full" CPU set. */ #define SKE_RUNQ_PCPU(ts) \ ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) @@ -1165,34 +1166,25 @@ static int sched_pickcpu(struct thread *td) { - int best, cpu, fullset; + int best, cpu; mtx_assert(&sched_lock, MA_OWNED); - fullset = 1; best = NOCPU; for (cpu = 0; cpu <= mp_maxid; cpu++) { if (CPU_ABSENT(cpu)) continue; - if (!THREAD_CAN_SCHED(td, cpu)) { - /* - * At least one available CPU isn't in our - * set, so it isn't a "full" set. - */ - fullset = 0; + if (!THREAD_CAN_SCHED(td, cpu)) continue; - } if (best == NOCPU) best = cpu; else if (runq_length[cpu] < runq_length[best]) best = cpu; } + KASSERT(best != NOCPU, ("no valid CPUs")); - if (fullset) - return (NOCPU); - else - return (best); + return (best); } #endif @@ -1232,25 +1224,25 @@ single_cpu = 1; CTR3(KTR_RUNQ, "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); - } else if ((td)->td_flags & TDF_BOUND) { + } else if (td->td_flags & TDF_BOUND) { /* Find CPU from bound runq */ KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq")); cpu = ts->ts_runq - &runq_pcpu[0]; single_cpu = 1; CTR3(KTR_RUNQ, "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); - } else { + } else if (td->td_flags & TDF_AFFINITY) { /* Find a valid CPU for our cpuset */ cpu = sched_pickcpu(td); - if (cpu == NOCPU) { - CTR2(KTR_RUNQ, + ts->ts_runq = &runq_pcpu[cpu]; + single_cpu = 1; + CTR3(KTR_RUNQ, + "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); + } else { + CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td); - ts->ts_runq = &runq; - } else { - single_cpu = 1; - CTR3(KTR_RUNQ, - "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); - } + cpu = NOCPU; + ts->ts_runq = &runq; } if (single_cpu && (cpu != PCPU_GET(cpuid))) { @@ -1577,16 +1569,56 @@ sched_affinity(struct thread *td) { #ifdef SMP + struct td_sched *ts; + int cpu; + + THREAD_LOCK_ASSERT(td, MA_OWNED); - THREAD_LOCK_ASSERT(td, MA_OWNED); + /* + * Set the TDF_AFFINITY flag if there is at least one CPU this + * thread can't run on. + */ + td->td_flags &= ~TDF_AFFINITY; + for (cpu = 0; cpu <= mp_maxid; cpu++) { + if (CPU_ABSENT(cpu)) + continue; + if (!THREAD_CAN_SCHED(td, cpu)) { + td->td_flags |= TDF_AFFINITY; + break; + } + } /* - * See if our current CPU is in the set. If not, force a - * context switch. + * If this thread can run on all CPUs, nothing else to do. */ - if (THREAD_CAN_SCHED(td, PCPU_GET(cpuid))) + if (!(td->td_flags & TDF_AFFINITY)) return; - mi_switch(SW_VOL, NULL); + switch (td->td_state) { + case TDS_RUNQ: + /* + * If we are on a per-CPU runqueue that is in the set, + * then nothing needs to be done. + */ + ts = td->td_sched; + if (ts->ts_runq != &runq && + THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu)) + return; + + /* Put this thread on a valid per-CPU runqueue. */ + sched_rem(td); + sched_add(td, SRQ_BORING); + break; + case TDS_RUNNING: + /* + * See if our current CPU is in the set. If not, force a + * context switch. + */ + if (THREAD_CAN_SCHED(td, td->td_oncpu)) + return; + + mi_switch(SW_VOL, NULL); + break; + } #endif }