From owner-p4-projects@FreeBSD.ORG Tue Jul 6 07:26:31 2004 Return-Path: Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 368BC16A529; Tue, 6 Jul 2004 07:26:31 +0000 (GMT) Delivered-To: perforce@freebsd.org Received: from mx1.FreeBSD.org (mx1.freebsd.org [216.136.204.125]) by hub.freebsd.org (Postfix) with ESMTP id DEBEB16A523 for ; Tue, 6 Jul 2004 07:26:30 +0000 (GMT) Received: from repoman.freebsd.org (repoman.freebsd.org [216.136.204.115]) by mx1.FreeBSD.org (Postfix) with ESMTP id D2ACD43D3F for ; Tue, 6 Jul 2004 07:26:30 +0000 (GMT) (envelope-from julian@freebsd.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.12.11/8.12.11) with ESMTP id i667QU0I010704 for ; Tue, 6 Jul 2004 07:26:30 GMT (envelope-from julian@freebsd.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.12.11/8.12.11/Submit) id i667QUct010701 for perforce@freebsd.org; Tue, 6 Jul 2004 07:26:30 GMT (envelope-from julian@freebsd.org) Date: Tue, 6 Jul 2004 07:26:30 GMT Message-Id: <200407060726.i667QUct010701@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to julian@freebsd.org using -f From: Julian Elischer To: Perforce Change Reviews Subject: PERFORCE change 56596 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.1 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 06 Jul 2004 07:26:32 -0000 http://perforce.freebsd.org/chv.cgi?CH=56596 Change 56596 by julian@julian_jules1 on 2004/07/06 07:26:07 catch up with kern_switch.c that has been moved elsewhere in my code. Affected files ... .. //depot/projects/nsched/sys/kern/sched_4bsd.c#24 edit .. //depot/projects/nsched/sys/kern/sched_ule.c#12 edit Differences ... ==== //depot/projects/nsched/sys/kern/sched_4bsd.c#24 (text+ko) ==== @@ -36,6 +36,8 @@ #include __FBSDID("$FreeBSD: src/sys/kern/sched_4bsd.c,v 1.43 2004/07/02 20:21:43 jhb Exp $"); +#include "opt_full_preemption.h" + #include #include #include @@ -1832,6 +1834,13 @@ KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); if (td->td_critnest == 1) { +#ifdef PREEMPTION + if (td->td_flags & TDF_OWEPREEMPT) { + mtx_lock_spin(&sched_lock); + mi_switch(SW_INVOL, NULL); + mtx_unlock_spin(&sched_lock); + } +#endif td->td_critnest = 0; cpu_critical_exit(); } else { @@ -1839,6 +1848,88 @@ } } +/* + * This function is called when a thread is about to be put on run queue + * because it has been made runnable or its priority has been adjusted. It + * determines if the new thread should be immediately preempted to. If so, + * it switches to it and eventually returns true. If not, it returns false + * so that the caller may place the thread on an appropriate run queue. + */ +int +maybe_preempt(struct thread *td) +{ +#ifdef PREEMPTION + struct thread *ctd; + int cpri, pri; +#endif + + mtx_assert(&sched_lock, MA_OWNED); +#ifdef PREEMPTION + /* + * The new thread should not preempt the current thread if any of the + * following conditions are true: + * + * - The current thread has a higher (numerically lower) priority. + * - It is too early in the boot for context switches (cold is set). + * - The current thread has an inhibitor set or is in the process of + * exiting. In this case, the current thread is about to switch + * out anyways, so there's no point in preempting. If we did, + * the current thread would not be properly resumed as well, so + * just avoid that whole landmine. + * - If the new thread's priority is not a realtime priority and + * the current thread's priority is not an idle priority and + * FULL_PREEMPTION is disabled. + * + * If all of these conditions are false, but the current thread is in + * a nested critical section, then we have to defer the preemption + * until we exit the critical section. Otherwise, switch immediately + * to the new thread. + */ + ctd = curthread; + pri = td->td_priority; + cpri = ctd->td_priority; + if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) || + td->td_td_sched->std_state != STDS_THREAD) + return (0); +#ifndef FULL_PREEMPTION + if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) && + !(cpri >= PRI_MIN_IDLE)) + return (0); +#endif + if (ctd->td_critnest > 1) { + CTR1(KTR_PROC, "maybe_preempt: in critical section %d", + ctd->td_critnest); + ctd->td_flags |= TDF_OWEPREEMPT; + return (0); + } + + /* + * Our thread state says that we are already on a run queue, so + * update our state as if we had been dequeued by choosethread(). + */ + MPASS(TD_ON_RUNQ(td)); + TD_SET_RUNNING(td); + CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, + td->td_proc->p_pid, td->td_proc->p_comm); + mi_switch(SW_INVOL, td); + return (1); +#else + return (0); +#endif +} + +#ifndef PREEMPTION +/* XXX: There should be a non-static version of this. */ +static void +printf_caddr_t(void *data) +{ + printf("%s", (char *)data); +} +static char preempt_warning[] = + "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; +SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, + preempt_warning) +#endif /************************************************************************ * SYSTEM RUN QUEUE manipulations and tests * ==== //depot/projects/nsched/sys/kern/sched_ule.c#12 (text+ko) ==== @@ -2772,6 +2772,13 @@ KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); if (td->td_critnest == 1) { +#ifdef PREEMPTION + if (td->td_flags & TDF_OWEPREEMPT) { + mtx_lock_spin(&sched_lock); + mi_switch(SW_INVOL, NULL); + mtx_unlock_spin(&sched_lock); + } +#endif td->td_critnest = 0; cpu_critical_exit(); } else { @@ -2780,6 +2787,88 @@ } +/* + * This function is called when a thread is about to be put on run queue + * because it has been made runnable or its priority has been adjusted. It + * determines if the new thread should be immediately preempted to. If so, + * it switches to it and eventually returns true. If not, it returns false + * so that the caller may place the thread on an appropriate run queue. + */ +int +maybe_preempt(struct thread *td) +{ +#ifdef PREEMPTION + struct thread *ctd; + int cpri, pri; +#endif + + mtx_assert(&sched_lock, MA_OWNED); +#ifdef PREEMPTION + /* + * The new thread should not preempt the current thread if any of the + * following conditions are true: + * + * - The current thread has a higher (numerically lower) priority. + * - It is too early in the boot for context switches (cold is set). + * - The current thread has an inhibitor set or is in the process of + * exiting. In this case, the current thread is about to switch + * out anyways, so there's no point in preempting. If we did, + * the current thread would not be properly resumed as well, so + * just avoid that whole landmine. + * - If the new thread's priority is not a realtime priority and + * the current thread's priority is not an idle priority and + * FULL_PREEMPTION is disabled. + * + * If all of these conditions are false, but the current thread is in + * a nested critical section, then we have to defer the preemption + * until we exit the critical section. Otherwise, switch immediately + * to the new thread. + */ + ctd = curthread; + pri = td->td_priority; + cpri = ctd->td_priority; + if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) || + td->td_kse->ke_state != KES_THREAD) + return (0); +#ifndef FULL_PREEMPTION + if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) && + !(cpri >= PRI_MIN_IDLE)) + return (0); +#endif + if (ctd->td_critnest > 1) { + CTR1(KTR_PROC, "maybe_preempt: in critical section %d", + ctd->td_critnest); + ctd->td_flags |= TDF_OWEPREEMPT; + return (0); + } + + /* + * Our thread state says that we are already on a run queue, so + * update our state as if we had been dequeued by choosethread(). + */ + MPASS(TD_ON_RUNQ(td)); + TD_SET_RUNNING(td); + CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, + td->td_proc->p_pid, td->td_proc->p_comm); + mi_switch(SW_INVOL, td); + return (1); +#else + return (0); +#endif +} + +#ifndef PREEMPTION +/* XXX: There should be a non-static version of this. */ +static void +printf_caddr_t(void *data) +{ + printf("%s", (char *)data); +} +static char preempt_warning[] = + "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; +SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, + preempt_warning) +#endif /************************************************************************ * SYSTEM RUN QUEUE manipulations and tests * * basically from the standard BSD scheduler *