Date: Wed, 16 Apr 2003 23:55:50 -0700 (PDT) From: Julian Elischer <julian@elischer.org> To: Jeff Roberson <jroberson@chesapeake.net> Cc: freebsd-threads@freebsd.org Subject: Re: Patches for threads/scheduler abstraction. Message-ID: <Pine.BSF.4.21.0304162233240.94222-101000@InterJet.elischer.org> In-Reply-To: <20030416221535.B76635-100000@mail.chesapeake.net>
index | next in thread | previous in thread | raw e-mail
[-- Attachment #1 --]
On Wed, 16 Apr 2003, Jeff Roberson wrote:
>
> On Wed, 16 Apr 2003, Julian Elischer wrote:
>
> >
> > OK I will send them shortly.
> >
> > These patches are NOT CLEAN
> > by which I mean I have not done any tidying up yet.
> >
> > It is still in the "cat dragged this over the fence" state.
> > However it does compile cleanly with only four files knowing what a kse
> > is (Not counting ULE, I have not touched ULE yet) (and I would actualy
> > rather that you did.. you understand it better).
> >
> > !!!!!!I have not tried to RUN this yet!!!!!!
> > The files that know what a kse is are:
> > ref3# grep ksevar.h kern/*
> > kern/kern_switch.c:#include <sys/ksevar.h>
> > kern/kern_thread.c:#include <sys/ksevar.h>
> > kern/sched_4bsd.c:#include <sys/ksevar.h>
> > kern/sched_4bsd_kse.c:#include <sys/ksevar.h>
> > (note that kern_thr.c is not one of them)
> >
> > My plan is:
> > eventually, sched_4bsd.c will only do the original BSD
> > scheduling (except with threads instead of procsses)
> > i.e. unfair, but simple and easy to benchmark on non-threaded apps.
> > (or on threaded apps with no competing processes)
> >
> > sched_4bsd_kse.c will suck in the current contents of kern_switch.c
> > and the KSE'd version of sched_4bsd.c.
> >
> > this will act like the current 4bsd scheduler WITH KSEs
> >
>
> There is a better way to do this. If you would listen to my tiered
> approach you could end up with one copy of sched_4bsd and not two.
> The kse specific logic could just sit in kern_kse which would not
> tell sched_4bsd about threads until they owned the KSE. We could
> even do this without the kse structure as a go between.
I think that is an overcomplication.
the two BSD based files would be very different.
For a start the simple one would be queueing threads on the run queues.
A system compiled with that scheduelr would have no KSEs anywhere
in the entire kernel.
The kse one would be queueing KSEs. I don't see how you can do this
with a shared file.
Anyhow, the following hack (totaly unoptimised.... notice the
existance of sched_td_exit, sched_exit_thread, and sched_thr_exit..
I just haven't got to cleaning it) is not so advanced that
the question of tiered schedulers is relevant yet..
this patch just shows that it is possible to get the KSEs
out of the rst of the system.
>
> > switch.c will go away (unless you want it as part of ULE).
> >
> > kern_thread.c will forget all about KSEs
> > (but its not there yet).
> >
> > I'll send the patches in an hour or so..
> > My wife's calling :-/
> >
> >
> >
> >
> >
> >
> > >
> > > >
> > > > Julian
> > > >
> > > >
> > >
> > >
> >
>
>
[-- Attachment #2 --]
un-kse.diff 0100644 0000000 0000000 00000075153 07647446000 011754 0 ustar root wheel ? sys/ls.gmon
? sys/conf/ls.gmon
? sys/i386/conf/REF3
? sys/i386/conf/ls.gmon
? sys/kern/sched_4bsd_kse.c
? sys/sys/ksevar.h
? sys/sys/ls.gmon
Index: sys/conf/files
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/conf/files,v
retrieving revision 1.782
diff -u -r1.782 files
--- sys/conf/files 2003/04/15 04:08:01 1.782
+++ sys/conf/files 2003/04/17 06:34:50
@@ -1053,6 +1053,7 @@
kern/md4c.c optional netsmb
kern/md5c.c standard
kern/sched_4bsd.c optional sched_4bsd
+kern/sched_4bsd_kse.c optional sched_4bsd
kern/sched_ule.c optional sched_ule
kern/subr_autoconf.c standard
kern/subr_blist.c standard
Index: sys/i386/i386/machdep.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/i386/i386/machdep.c,v
retrieving revision 1.561
diff -u -r1.561 machdep.c
--- sys/i386/i386/machdep.c 2003/04/02 23:53:28 1.561
+++ sys/i386/i386/machdep.c 2003/04/17 06:34:54
@@ -1931,11 +1931,7 @@
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
atdevbase = ISA_HOLE_START + KERNBASE;
- /*
- * This may be done better later if it gets more high level
- * components in it. If so just link td->td_proc here.
- */
- proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
+ thread0.td_proc = &proc0; /* some stuff assumes this is already done */
metadata_missing = 0;
if (bootinfo.bi_modulep) {
Index: sys/kern/init_main.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/init_main.c,v
retrieving revision 1.229
diff -u -r1.229 init_main.c
--- sys/kern/init_main.c 2003/04/13 21:29:10 1.229
+++ sys/kern/init_main.c 2003/04/17 06:34:55
@@ -88,7 +88,6 @@
static struct pgrp pgrp0;
struct proc proc0;
struct thread thread0;
-struct kse kse0;
struct ksegrp ksegrp0;
static struct procsig procsig0;
static struct filedesc0 filedesc0;
@@ -308,18 +307,12 @@
register unsigned i;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
GIANT_REQUIRED;
p = &proc0;
td = &thread0;
- ke = &kse0;
kg = &ksegrp0;
- ke->ke_sched = kse0_sched;
- kg->kg_sched = ksegrp0_sched;
- p->p_sched = proc0_sched;
- td->td_sched = thread0_sched;
/*
* Initialize magic number.
@@ -329,9 +322,11 @@
/*
* Initialize thread, process and pgrp structures.
*/
- procinit();
- threadinit();
+ procinit(); /* set up proc zone */
+ threadinit(); /* set up thread, upcall and KSEGRP zones */
+ schedinit(); /* set up zones etc. for the scheduler */
+
/*
* Initialize sleep queue hash table
*/
@@ -360,12 +355,7 @@
p->p_sysent = &null_sysvec;
- /*
- * proc_linkup was already done in init_i386() or alphainit() etc.
- * because the earlier code needed to follow td->td_proc. Otherwise
- * I would have done it here.. maybe this means this should be
- * done earlier too.
- */
+ proc_linkup(&proc0, &ksegrp0, &thread0);
p->p_flag = P_SYSTEM;
p->p_sflag = PS_INMEM;
p->p_state = PRS_NORMAL;
@@ -375,10 +365,7 @@
kg->kg_user_pri = PUSER;
td->td_priority = PVM;
td->td_base_pri = PUSER;
- td->td_kse = ke; /* XXXKSE */
td->td_oncpu = 0;
- ke->ke_state = KES_THREAD;
- ke->ke_thread = td;
p->p_peers = 0;
p->p_leader = p;
Index: sys/kern/kern_clock.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/kern_clock.c,v
retrieving revision 1.156
diff -u -r1.156 kern_clock.c
--- sys/kern/kern_clock.c 2003/04/11 03:39:07 1.156
+++ sys/kern/kern_clock.c 2003/04/17 06:34:55
@@ -357,7 +357,6 @@
struct rusage *ru;
struct vmspace *vm;
struct thread *td;
- struct kse *ke;
struct proc *p;
long rss;
@@ -365,7 +364,6 @@
p = td->td_proc;
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
- ke = td->td_kse;
if (CLKF_USERMODE(frame)) {
/*
* Charge the time as appropriate.
@@ -373,7 +371,7 @@
if (p->p_flag & P_THREADED)
thread_statclock(1);
p->p_uticks++;
- if (ke->ke_ksegrp->kg_nice > NZERO)
+ if (td->td_ksegrp->kg_nice > NZERO)
cp_time[CP_NICE]++;
else
cp_time[CP_USER]++;
@@ -405,7 +403,7 @@
}
}
- sched_clock(ke);
+ sched_clock(td);
/* Update resource usage integrals and maximums. */
if ((pstats = p->p_stats) != NULL &&
Index: sys/kern/kern_fork.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/kern_fork.c,v
retrieving revision 1.191
diff -u -r1.191 kern_fork.c
--- sys/kern/kern_fork.c 2003/04/11 03:39:07 1.191
+++ sys/kern/kern_fork.c 2003/04/17 06:34:55
@@ -217,7 +217,6 @@
struct filedesc *fd;
struct proc *p1 = td->td_proc;
struct thread *td2;
- struct kse *ke2;
struct ksegrp *kg2;
struct sigacts *newsigacts;
struct procsig *newprocsig;
@@ -448,7 +447,6 @@
*/
td2 = FIRST_THREAD_IN_PROC(p2);
kg2 = FIRST_KSEGRP_IN_PROC(p2);
- ke2 = FIRST_KSE_IN_KSEGRP(kg2);
/* Allocate and switch to an alternate kstack if specified */
if (pages != 0)
@@ -458,8 +456,6 @@
bzero(&p2->p_startzero,
(unsigned) RANGEOF(struct proc, p_startzero, p_endzero));
- bzero(&ke2->ke_startzero,
- (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
bzero(&td2->td_startzero,
(unsigned) RANGEOF(struct thread, td_startzero, td_endzero));
bzero(&kg2->kg_startzero,
@@ -477,11 +473,6 @@
(unsigned) RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
#undef RANGEOF
- /* Set up the thread as an active thread (as if runnable). */
- ke2->ke_state = KES_THREAD;
- ke2->ke_thread = td2;
- td2->td_kse = ke2;
-
/*
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
@@ -496,7 +487,7 @@
* Allow the scheduler to adjust the priority of the child and
* parent while we hold the sched_lock.
*/
- sched_fork(p1, p2);
+ sched_fork(td, p2);
mtx_unlock_spin(&sched_lock);
p2->p_ucred = crhold(td->td_ucred);
Index: sys/kern/kern_idle.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/kern_idle.c,v
retrieving revision 1.29
diff -u -r1.29 kern_idle.c
--- sys/kern/kern_idle.c 2002/10/12 05:32:23 1.29
+++ sys/kern/kern_idle.c 2003/04/17 06:34:55
@@ -65,7 +65,7 @@
p->p_state = PRS_NORMAL;
td = FIRST_THREAD_IN_PROC(p);
td->td_state = TDS_CAN_RUN;
- td->td_kse->ke_flags |= KEF_IDLEKSE;
+ td->td_flags |= TDF_IDLETD;
#ifdef SMP
}
#endif
Index: sys/kern/kern_intr.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/kern_intr.c,v
retrieving revision 1.91
diff -u -r1.91 kern_intr.c
--- sys/kern/kern_intr.c 2003/03/04 21:01:42 1.91
+++ sys/kern/kern_intr.c 2003/04/17 06:34:55
@@ -396,7 +396,7 @@
KASSERT((TD_IS_RUNNING(ctd)),
("ithread_schedule: Bad state for curthread."));
ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
- if (ctd->td_kse->ke_flags & KEF_IDLEKSE)
+ if (ctd->td_flags & TDF_IDLETD)
ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
mi_switch();
} else {
Index: sys/kern/kern_ktr.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/kern_ktr.c,v
retrieving revision 1.35
diff -u -r1.35 kern_ktr.c
--- sys/kern/kern_ktr.c 2003/03/11 19:56:16 1.35
+++ sys/kern/kern_ktr.c 2003/04/17 06:34:55
@@ -195,7 +195,7 @@
#ifdef KTR_ALQ
if (ktr_alq_enabled &&
td->td_critnest == 0 &&
- (td->td_kse->ke_flags & KEF_IDLEKSE) == 0 &&
+ (td->td_flags & TDF_IDLETD) == 0 &&
td != ald_thread) {
if (ktr_alq_max && ktr_alq_cnt > ktr_alq_max)
goto done;
Index: sys/kern/kern_proc.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/kern_proc.c,v
retrieving revision 1.178
diff -u -r1.178 kern_proc.c
--- sys/kern/kern_proc.c 2003/04/10 17:35:44 1.178
+++ sys/kern/kern_proc.c 2003/04/17 06:34:55
@@ -148,18 +148,17 @@
struct proc *p;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
/* INVARIANTS checks go here */
p = (struct proc *)mem;
+ td = FIRST_THREAD_IN_PROC(p);
KASSERT((p->p_numthreads == 1),
("bad number of threads in exiting process"));
- td = FIRST_THREAD_IN_PROC(p);
+#ifdef INVARIANTS
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
kg = FIRST_KSEGRP_IN_PROC(p);
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
- ke = FIRST_KSE_IN_KSEGRP(kg);
- KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
+#endif
/* Dispose of an alternate kstack, if it exists.
* XXX What if there are more than one thread in the proc?
@@ -168,14 +167,6 @@
*/
if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
pmap_dispose_altkstack(td);
-
- /*
- * We want to make sure we know the initial linkages.
- * so for now tear them down and remake them.
- * This is probably un-needed as we can probably rely
- * on the state coming in here from wait4().
- */
- proc_linkup(p, kg, ke, td);
}
/*
@@ -187,15 +178,14 @@
struct proc *p;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
p = (struct proc *)mem;
p->p_sched = (struct p_sched *)&p[1];
vm_proc_new(p);
td = thread_alloc();
- ke = kse_alloc();
kg = ksegrp_alloc();
- proc_linkup(p, kg, ke, td);
+ proc_linkup(p, kg, td);
+ sched_newproc(p, kg, td);
}
/*
@@ -207,7 +197,6 @@
struct proc *p;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
p = (struct proc *)mem;
KASSERT((p->p_numthreads == 1),
@@ -216,12 +205,10 @@
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
kg = FIRST_KSEGRP_IN_PROC(p);
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
- ke = FIRST_KSE_IN_KSEGRP(kg);
- KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
vm_proc_dispose(p);
+ sched_destroyproc(p, kg, td);
thread_free(td);
ksegrp_free(kg);
- kse_free(ke);
}
/*
@@ -611,7 +598,6 @@
{
struct thread *td;
struct thread *td0;
- struct kse *ke;
struct ksegrp *kg;
struct tty *tp;
struct session *sp;
@@ -719,8 +705,6 @@
/* vvv XXXKSE */
if (!(p->p_flag & P_THREADED)) {
kg = td->td_ksegrp;
- ke = td->td_kse;
- KASSERT((ke != NULL), ("fill_kinfo_proc: Null KSE"));
bintime2timeval(&p->p_runtime, &tv);
kp->ki_runtime =
tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
@@ -742,9 +726,8 @@
kp->ki_pcb = td->td_pcb;
kp->ki_kstack = (void *)td->td_kstack;
- /* Things in the kse */
- kp->ki_rqindex = ke->ke_rqindex;
- kp->ki_pctcpu = sched_pctcpu(ke);
+ kp->ki_rqindex = 0; /* can't get this now */
+ kp->ki_pctcpu = sched_pctcpu(td);
} else {
kp->ki_oncpu = -1;
kp->ki_lastcpu = -1;
Index: sys/kern/kern_switch.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/kern_switch.c,v
retrieving revision 1.57
diff -u -r1.57 kern_switch.c
--- sys/kern/kern_switch.c 2003/04/02 23:53:29 1.57
+++ sys/kern/kern_switch.c 2003/04/17 06:34:55
@@ -95,6 +95,7 @@
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/ksevar.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sched.h>
Index: sys/kern/kern_thr.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/kern_thr.c,v
retrieving revision 1.5
diff -u -r1.5 kern_thr.c
--- sys/kern/kern_thr.c 2003/04/11 19:24:37 1.5
+++ sys/kern/kern_thr.c 2003/04/17 06:34:55
@@ -52,13 +52,11 @@
{
struct ksegrp *kg;
struct thread *td;
- struct kse *ke;
struct proc *p;
td = curthread;
p = td->td_proc;
kg = td->td_ksegrp;
- ke = td->td_kse;
mtx_assert(&sched_lock, MA_OWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
@@ -80,18 +78,9 @@
/* Clean up cpu resources. */
cpu_thread_exit(td);
+ thread_unlink(td);
+ sched_thr_exit(td);
- /* XXX make thread_unlink() */
- TAILQ_REMOVE(&p->p_threads, td, td_plist);
- p->p_numthreads--;
- TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
- kg->kg_numthreads--;
-
- ke->ke_state = KES_UNQUEUED;
- ke->ke_thread = NULL;
- kse_unlink(ke);
- sched_exit_kse(TAILQ_NEXT(ke, ke_kglist), ke);
-
/*
* If we were stopped while waiting for all threads to exit and this
* is the last thread wakeup the exiting thread.
@@ -101,7 +90,6 @@
thread_unsuspend_one(p->p_singlethread);
PROC_UNLOCK(p);
- td->td_kse = NULL;
td->td_state = TDS_INACTIVE;
#if 0
td->td_proc = NULL;
@@ -127,7 +115,6 @@
thr_create(struct thread *td, struct thr_create_args *uap)
/* ucontext_t *ctx, thr_id_t *id, int flags */
{
- struct kse *ke0;
struct thread *td0;
ucontext_t ctx;
int error;
@@ -157,32 +144,27 @@
bcopy(td->td_frame, td0->td_frame, sizeof(struct trapframe));
td0->td_ucred = crhold(td->td_ucred);
- /* Initialize our kse structure. */
- ke0 = kse_alloc();
- bzero(&ke0->ke_startzero,
- RANGEOF(struct kse, ke_startzero, ke_endzero));
/* Set up our machine context. */
cpu_set_upcall(td0, td->td_pcb);
error = set_mcontext(td0, &ctx.uc_mcontext);
if (error != 0) {
- kse_free(ke0);
thread_free(td0);
goto out;
}
- /* Link the thread and kse into the ksegrp and make it runnable. */
mtx_lock_spin(&sched_lock);
thread_link(td0, td->td_ksegrp);
- kse_link(ke0, td->td_ksegrp);
-
- /* Bind this thread and kse together. */
- td0->td_kse = ke0;
- ke0->ke_thread = td0;
- sched_fork_kse(td->td_kse, ke0);
- sched_fork_thread(td, td0);
+ /* Do whatever is needed for the scheduler */
+ error = sched_thread_fork(td, td0);
+ if (error != 0) {
+ thread_unlink(td);
+ mtx_unlock_spin(&sched_lock);
+ thread_free(td0);
+ goto out;
+ }
TD_SET_CAN_RUN(td0);
if ((uap->flags & THR_SUSPENDED) == 0)
Index: sys/kern/kern_thread.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/kern_thread.c,v
retrieving revision 1.114
diff -u -r1.114 kern_thread.c
--- sys/kern/kern_thread.c 2003/04/10 17:35:44 1.114
+++ sys/kern/kern_thread.c 2003/04/17 06:34:55
@@ -34,6 +34,7 @@
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
+#include <sys/ksevar.h>
#include <sys/proc.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
@@ -61,7 +62,6 @@
* KSEGRP related storage.
*/
static uma_zone_t ksegrp_zone;
-static uma_zone_t kse_zone;
static uma_zone_t thread_zone;
static uma_zone_t upcall_zone;
@@ -203,18 +203,6 @@
}
/*
- * Initialize type-stable parts of a kse (when newly created).
- */
-static void
-kse_init(void *mem, int size)
-{
- struct kse *ke;
-
- ke = (struct kse *)mem;
- ke->ke_sched = (struct ke_sched *)&ke[1];
-}
-
-/*
* Initialize type-stable parts of a ksegrp (when newly created).
*/
static void
@@ -226,44 +214,6 @@
kg->kg_sched = (struct kg_sched *)&kg[1];
}
-/*
- * KSE is linked into kse group.
- */
-void
-kse_link(struct kse *ke, struct ksegrp *kg)
-{
- struct proc *p = kg->kg_proc;
-
- TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
- kg->kg_kses++;
- ke->ke_state = KES_UNQUEUED;
- ke->ke_proc = p;
- ke->ke_ksegrp = kg;
- ke->ke_thread = NULL;
- ke->ke_oncpu = NOCPU;
- ke->ke_flags = 0;
-}
-
-void
-kse_unlink(struct kse *ke)
-{
- struct ksegrp *kg;
-
- mtx_assert(&sched_lock, MA_OWNED);
- kg = ke->ke_ksegrp;
- TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
- if (ke->ke_state == KES_IDLE) {
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses--;
- }
- if (--kg->kg_kses == 0)
- ksegrp_unlink(kg);
- /*
- * Aggregate stats from the KSE
- */
- kse_stash(ke);
-}
-
void
ksegrp_link(struct ksegrp *kg, struct proc *p)
{
@@ -364,8 +314,7 @@
* link up all the structures and its initial threads etc.
*/
void
-proc_linkup(struct proc *p, struct ksegrp *kg,
- struct kse *ke, struct thread *td)
+proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
{
TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
@@ -375,7 +324,6 @@
p->p_numthreads = 0;
ksegrp_link(kg, p);
- kse_link(ke, kg);
thread_link(td, kg);
}
@@ -805,9 +753,6 @@
ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
NULL, NULL, ksegrp_init, NULL,
UMA_ALIGN_CACHE, 0);
- kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
- NULL, NULL, kse_init, NULL,
- UMA_ALIGN_CACHE, 0);
upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
}
@@ -925,15 +870,6 @@
}
/*
- * Allocate a kse.
- */
-struct kse *
-kse_alloc(void)
-{
- return (uma_zalloc(kse_zone, M_WAITOK));
-}
-
-/*
* Allocate a thread.
*/
struct thread *
@@ -953,15 +889,6 @@
}
/*
- * Deallocate a kse.
- */
-void
-kse_free(struct kse *td)
-{
- uma_zfree(kse_zone, td);
-}
-
-/*
* Deallocate a thread.
*/
void
@@ -1203,10 +1130,7 @@
* would only be called from here (I think) so it would
* be a waste. (might be useful for proc_fini() as well.)
*/
- TAILQ_REMOVE(&p->p_threads, td, td_plist);
- p->p_numthreads--;
- TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
- kg->kg_numthreads--;
+ thread_unlink(td);
if (p->p_maxthrwaits)
wakeup(&p->p_numthreads);
/*
@@ -1231,15 +1155,11 @@
if (td->td_upcall)
upcall_remove(td);
- ke->ke_state = KES_UNQUEUED;
- ke->ke_thread = NULL;
/*
- * Decide what to do with the KSE attached to this thread.
+ * Let the scheduler do anything it needs to clean up.
*/
- if (ke->ke_flags & KEF_EXIT)
- kse_unlink(ke);
- else
- kse_reassign(ke);
+ sched_tdexit(td);
+
PROC_UNLOCK(p);
td->td_kse = NULL;
td->td_state = TDS_INACTIVE;
@@ -1247,7 +1167,6 @@
td->td_proc = NULL;
#endif
td->td_ksegrp = NULL;
- td->td_last_kse = NULL;
PCPU_SET(deadthread, td);
} else {
PROC_UNLOCK(p);
@@ -1311,6 +1230,18 @@
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
p->p_numthreads++;
kg->kg_numthreads++;
+}
+
+void
+thread_unlink(struct thread *td)
+{
+ struct proc *p = td->td_proc;
+ struct ksegrp *kg = td->td_ksegrp;
+
+ TAILQ_REMOVE(&p->p_threads, td, td_plist);
+ p->p_numthreads--;
+ TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
+ kg->kg_numthreads--;
}
/*
Index: sys/kern/sched_4bsd.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/sched_4bsd.c,v
retrieving revision 1.15
diff -u -r1.15 sched_4bsd.c
--- sys/kern/sched_4bsd.c 2003/04/11 03:39:48 1.15
+++ sys/kern/sched_4bsd.c 2003/04/17 06:34:55
@@ -44,6 +44,7 @@
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/ksevar.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/sched.h>
@@ -438,13 +439,13 @@
* run much recently, and to round-robin among other processes.
*/
void
-sched_clock(struct kse *ke)
+sched_clock(struct thread *td)
{
struct ksegrp *kg;
- struct thread *td;
+ struct kse *ke;
- kg = ke->ke_ksegrp;
- td = ke->ke_thread;
+ kg = td->td_ksegrp;
+ ke = td->td_kse;
ke->ke_sched->ske_cpticks++;
kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
@@ -487,31 +488,6 @@
}
void
-sched_fork(struct proc *p, struct proc *p1)
-{
- sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
- sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
- sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
-}
-
-void
-sched_fork_kse(struct kse *ke, struct kse *child)
-{
- child->ke_sched->ske_cpticks = 0;
-}
-
-void
-sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
-{
- child->kg_estcpu = kg->kg_estcpu;
-}
-
-void
-sched_fork_thread(struct thread *td, struct thread *child)
-{
-}
-
-void
sched_nice(struct ksegrp *kg, int nice)
{
kg->kg_nice = nice;
@@ -695,7 +671,13 @@
}
fixpt_t
-sched_pctcpu(struct kse *ke)
+sched_pctcpu(struct thread *td)
{
- return (ke->ke_pctcpu);
+ struct kse *ke;
+
+ ke = td->td_kse;
+ if (ke)
+ return (ke->ke_pctcpu);
+ else
+ return ((fixpt_t)0);
}
Index: sys/kern/sched_ule.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/sched_ule.c,v
retrieving revision 1.28
diff -u -r1.28 sched_ule.c
--- sys/kern/sched_ule.c 2003/04/12 22:33:24 1.28
+++ sys/kern/sched_ule.c 2003/04/17 06:34:55
@@ -855,11 +855,11 @@
}
void
-sched_clock(struct kse *ke)
+sched_clock(struct thread *td)
{
struct kseq *kseq;
struct ksegrp *kg;
- struct thread *td;
+ struct kse *ke
#if 0
struct kse *nke;
#endif
@@ -880,8 +880,8 @@
tickincr = 1;
}
- td = ke->ke_thread;
- kg = ke->ke_ksegrp;
+ ke = td->td_kse;
+ kg = td->td_ksegrp;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((td != NULL), ("schedclock: null thread pointer"));
@@ -894,7 +894,7 @@
if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
sched_pctcpu_update(ke);
- if (td->td_kse->ke_flags & KEF_IDLEKSE)
+ if (td->td_flags & TD_IDLETD)
return;
CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
Index: sys/kern/subr_trap.c
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/kern/subr_trap.c,v
retrieving revision 1.250
diff -u -r1.250 subr_trap.c
--- sys/kern/subr_trap.c 2003/03/31 22:49:16 1.250
+++ sys/kern/subr_trap.c 2003/04/17 06:34:57
@@ -139,7 +139,6 @@
{
struct thread *td;
struct proc *p;
- struct kse *ke;
struct ksegrp *kg;
struct rlimit *rlim;
u_int prticks, sticks;
@@ -170,7 +169,6 @@
* ast() will be called again.
*/
mtx_lock_spin(&sched_lock);
- ke = td->td_kse;
sticks = td->td_sticks;
flags = td->td_flags;
sflag = p->p_sflag;
Index: sys/sys/proc.h
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/sys/proc.h,v
retrieving revision 1.313
diff -u -r1.313 proc.h
--- sys/sys/proc.h 2003/04/13 21:29:11 1.313
+++ sys/sys/proc.h 2003/04/17 06:35:01
@@ -200,20 +200,6 @@
struct thread;
/*
- * The second structure is the Kernel Schedulable Entity. (KSE)
- * It represents the ability to take a slot in the scheduler queue.
- * As long as this is scheduled, it could continue to run any threads that
- * are assigned to the KSEGRP (see later) until either it runs out
- * of runnable threads of high enough priority, or CPU.
- * It runs on one CPU and is assigned a quantum of time. When a thread is
- * blocked, The KSE continues to run and will search for another thread
- * in a runnable state amongst those it has. It May decide to return to user
- * mode with a new 'empty' thread if there are no runnable threads.
- * Threads are temporarily associated with a KSE for scheduling reasons.
- */
-struct kse;
-
-/*
* The KSEGRP is allocated resources across a number of CPUs.
* (Including a number of CPUxQUANTA. It parcels these QUANTA up among
* its KSEs, each of which should be running in a different CPU.
@@ -233,25 +219,13 @@
* forked process cluster by spawning several KSEGRPs.
*/
struct proc;
-
-/***************
- * In pictures:
- With a single run queue used by all processors:
-
- RUNQ: --->KSE---KSE--... SLEEPQ:[]---THREAD---THREAD---THREAD
- | / []---THREAD
- KSEG---THREAD--THREAD--THREAD []
- []---THREAD---THREAD
-
- (processors run THREADs from the KSEG until they are exhausted or
- the KSEG exhausts its quantum)
-
-With PER-CPU run queues:
-KSEs on the separate run queues directly
-They would be given priorities calculated from the KSEG.
- *
- *****************/
+struct dummy1
+{
+};
+#ifndef _SYS_KSEVAR_H_
+#define KSEDEF dummy1
+#endif
/*
* Kernel runnable context (thread).
@@ -279,8 +253,8 @@
#define td_startzero td_flags
int td_flags; /* (j) TDF_* flags. */
int td_inhibitors; /* (j) Why can not run */
- struct kse *td_last_kse; /* (j) Previous value of td_kse */
- struct kse *td_kse; /* (j) Current KSE if running. */
+ struct KSEDEF *td_last_kse; /* (j) Previous value of td_kse */
+ struct KSEDEF *td_kse; /* (j) Current KSE if running. */
int td_dupfd; /* (k) Ret value from fdopen. XXX */
void *td_wchan; /* (j) Sleep address. */
const char *td_wmesg; /* (j) Reason for sleep. */
@@ -349,6 +323,7 @@
#define TDF_CAN_UNBIND 0x000004 /* Only temporarily bound. */
#define TDF_SINTR 0x000008 /* Sleep is interruptible. */
#define TDF_TIMEOUT 0x000010 /* Timing out during sleep. */
+#define TDF_IDLETD 0x000040 /* This is an idle thread */
#define TDF_SELECT 0x000040 /* Selecting; wakeup/waiting danger. */
#define TDF_CVWAITQ 0x000080 /* Thread is on a cv_waitq (not slpq). */
#define TDF_UPCALLING 0x000100 /* This thread is doing an upcall. */
@@ -418,43 +393,6 @@
} while (0)
/*
- * The schedulable entity that can be given a context to run.
- * A process may have several of these. Probably one per processor
- * but posibly a few more. In this universe they are grouped
- * with a KSEG that contains the priority and niceness
- * for the group.
- */
-struct kse {
- struct proc *ke_proc; /* Associated process. */
- struct ksegrp *ke_ksegrp; /* Associated KSEG. */
- TAILQ_ENTRY(kse) ke_kglist; /* Queue of all KSEs in ke_ksegrp. */
- TAILQ_ENTRY(kse) ke_kgrlist; /* Queue of all KSEs in this state. */
- TAILQ_ENTRY(kse) ke_procq; /* (j) Run queue. */
-
-#define ke_startzero ke_flags
- int ke_flags; /* (j) KEF_* flags. */
- struct thread *ke_thread; /* Active associated thread. */
- fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
- u_char ke_oncpu; /* (j) Which cpu we are on. */
- char ke_rqindex; /* (j) Run queue index. */
- enum {
- KES_UNUSED = 0x0,
- KES_IDLE,
- KES_ONRUNQ,
- KES_UNQUEUED, /* in transit */
- KES_THREAD /* slaved to thread state */
- } ke_state; /* (j) S* process status. */
-#define ke_endzero ke_dummy
- u_char ke_dummy;
- struct ke_sched *ke_sched; /* Scheduler specific data */
-};
-
-/* flags kept in ke_flags */
-#define KEF_IDLEKSE 0x00004 /* A 'Per CPU idle process'.. has one thread */
-#define KEF_DIDRUN 0x02000 /* KSE actually ran. */
-#define KEF_EXIT 0x04000 /* KSE is being killed. */
-
-/*
* The upcall management structure.
* The upcall is used when returning to userland. If a thread does not have
* an upcall on return to userland the thread exports its context and exits.
@@ -479,8 +417,8 @@
struct ksegrp {
struct proc *kg_proc; /* Process that contains this KSEG. */
TAILQ_ENTRY(ksegrp) kg_ksegrp; /* Queue of KSEGs in kg_proc. */
- TAILQ_HEAD(, kse) kg_kseq; /* (ke_kglist) All KSEs. */
- TAILQ_HEAD(, kse) kg_iq; /* (ke_kgrlist) All idle KSEs. */
+ TAILQ_HEAD(, KSEDEF) kg_kseq; /* (ke_kglist) All KSEs. */
+ TAILQ_HEAD(, KSEDEF) kg_iq; /* (ke_kgrlist) All idle KSEs. */
TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */
TAILQ_HEAD(, thread) kg_slpq; /* (td_runq) NONRUNNABLE threads. */
@@ -526,9 +464,6 @@
struct vm_object *p_upages_obj; /* (a) Upages object. */
struct procsig *p_procsig; /* (c) Signal actions, state (CPU). */
- /*struct ksegrp p_ksegrp;
- struct kse p_kse; */
-
/*
* The following don't make too much sense..
* See the td_ or ke_ versions of the same flags
@@ -829,7 +764,6 @@
extern struct proc proc0; /* Process slot for swapper. */
extern struct thread thread0; /* Primary thread in proc0 */
extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0 */
-extern struct kse kse0; /* Primary kse in proc0 */
extern struct vmspace vmspace0; /* VM space for proc0. */
extern int hogticks; /* Limit on kernel cpu hogs. */
extern int nprocs, maxproc; /* Current and max number of procs. */
@@ -880,8 +814,7 @@
void pargs_hold(struct pargs *pa);
void procinit(void);
void threadinit(void);
-void proc_linkup(struct proc *p, struct ksegrp *kg,
- struct kse *ke, struct thread *td);
+void proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td);
void proc_reparent(struct proc *child, struct proc *newparent);
int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);
@@ -912,17 +845,11 @@
struct ksegrp *ksegrp_alloc(void);
void ksegrp_free(struct ksegrp *kg);
void ksegrp_stash(struct ksegrp *kg);
-struct kse *kse_alloc(void);
-void kse_free(struct kse *ke);
-void kse_stash(struct kse *ke);
void cpu_set_upcall(struct thread *td, void *pcb);
void cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku);
void cpu_thread_clean(struct thread *);
void cpu_thread_exit(struct thread *);
void cpu_thread_setup(struct thread *td);
-void kse_reassign(struct kse *ke);
-void kse_link(struct kse *ke, struct ksegrp *kg);
-void kse_unlink(struct kse *ke);
void ksegrp_link(struct ksegrp *kg, struct proc *p);
void ksegrp_unlink(struct ksegrp *kg);
void thread_signal_add(struct thread *td, int sig);
@@ -943,6 +870,7 @@
void thread_stash(struct thread *td);
int thread_suspend_check(int how);
void thread_suspend_one(struct thread *td);
+void thread_unlink(struct thread *td);
void thread_unsuspend(struct proc *p);
void thread_unsuspend_one(struct thread *td);
int thread_userret(struct thread *td, struct trapframe *frame);
@@ -954,7 +882,7 @@
void upcall_link(struct kse_upcall *ku, struct ksegrp *kg);
void upcall_unlink(struct kse_upcall *ku);
void upcall_remove(struct thread *td);
-void upcall_stash(struct kse_upcall *ke);
+void upcall_stash(struct kse_upcall *ku);
void thread_sanity_check(struct thread *td, char *);
void thread_stopped(struct proc *p);
void thread_switchout(struct thread *td);
Index: sys/sys/sched.h
===================================================================
RCS file: /repos/projects/mirrored/freebsd/src/sys/sys/sched.h,v
retrieving revision 1.4
diff -u -r1.4 sched.h
--- sys/sys/sched.h 2003/04/11 03:39:06 1.4
+++ sys/sys/sched.h 2003/04/17 06:35:01
@@ -39,7 +39,7 @@
* Proc related scheduling hooks.
*/
void sched_exit(struct proc *p, struct proc *child);
-void sched_fork(struct proc *p, struct proc *child);
+void sched_fork(struct thread *td, struct proc *child);
/*
* KSE Groups contain scheduling priority information. They record the
@@ -63,12 +63,15 @@
void sched_userret(struct thread *td);
void sched_wakeup(struct thread *td);
+int sched_thread_fork(struct thread *td1, struct thread *td2);
/*
* KSEs are moved on and off of run queues.
*/
+struct kse;
+
void sched_add(struct kse *ke);
struct kse *sched_choose(void);
-void sched_clock(struct kse *ke);
+void sched_clock(struct thread *td);
void sched_exit_kse(struct kse *ke, struct kse *child);
void sched_fork_kse(struct kse *ke, struct kse *child);
void sched_rem(struct kse *ke);
@@ -76,7 +79,7 @@
/*
* and they use up cpu time.
*/
-fixpt_t sched_pctcpu(struct kse *ke);
+fixpt_t sched_pctcpu(struct thread *td);
/*
* These procedures tell the process data structure allocation code how
@@ -92,4 +95,12 @@
extern struct p_sched *proc0_sched;
extern struct td_sched *thread0_sched;
+
+/* temp hacks */
+int sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td);
+void sched_db_print(struct thread *td);
+void schedinit(void);
+void sched_tdexit(struct thread *td);
+void sched_thr_exit(struct thread *td);
+void sched_destroyproc(struct proc *p, struct ksegrp *kg, struct thread *td);
#endif /* !_SYS_SCHED_H_ */
sys/kern/sched_4bsd_kse.c 0100644 0000000 0000000 00000007676 07647376143 014524 0 ustar root wheel
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/ksevar.h>
#include <sys/proc.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>
#include <sys/filedesc.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/sx.h>
#include <sys/tty.h>
#include <sys/user.h>
#include <sys/jail.h>
#include <sys/kse.h>
#include <sys/ktr.h>
#include <sys/ucontext.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/pmap.h>
#include <vm/uma.h>
#include <vm/vm_map.h>
struct kse kse0;
static uma_zone_t kse_zone;
static void kse_init(void *mem, int size);
void
schedinit(void)
{
/*
* Set up the scheduler specific parts of proc0.
*/
kse_link(&kse0, &ksegrp0);
kse0.ke_sched = kse0_sched;
ksegrp0.kg_sched = ksegrp0_sched;
proc0.p_sched = proc0_sched;
thread0.td_sched = thread0_sched;
kse0.ke_state = KES_THREAD;
kse0.ke_thread = &thread0;
thread0.td_kse = &kse0; /* we are running */
kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
NULL, NULL, kse_init, NULL, UMA_ALIGN_CACHE, 0);
}
/*
* Initialize type-stable parts of a kse (when newly created).
*/
static void
kse_init(void *mem, int size)
{
struct kse *ke;
ke = (struct kse *)mem;
ke->ke_sched = (struct ke_sched *)&ke[1];
}
/*
* KSE is linked into kse group.
*/
void
kse_link(struct kse *ke, struct ksegrp *kg)
{
struct proc *p = kg->kg_proc;
TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
kg->kg_kses++;
ke->ke_state = KES_UNQUEUED;
ke->ke_proc = p;
ke->ke_ksegrp = kg;
ke->ke_thread = NULL;
ke->ke_oncpu = NOCPU;
ke->ke_flags = 0;
}
/*
* Allocate a kse.
*/
struct kse *
kse_alloc(void)
{
return (uma_zalloc(kse_zone, M_WAITOK));
}
/*
* Deallocate a kse.
*/
void
kse_free(struct kse *td)
{
uma_zfree(kse_zone, td);
}
int
sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
{
struct kse *ke;
ke = kse_alloc();
if (ke) {
kse_link(ke, kg);
return (0);
}
return (ENOMEM);
}
void
sched_destroyproc(struct proc *p, struct ksegrp *kg, struct thread *td)
{
struct kse *ke;
ke = td->td_kse;
if (ke)
kse_free(ke);
}
void
kse_unlink(struct kse *ke)
{
struct ksegrp *kg;
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
if (ke->ke_state == KES_IDLE) {
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses--;
}
if (--kg->kg_kses == 0)
ksegrp_unlink(kg);
/*
* Aggregate stats from the KSE
*/
kse_stash(ke);
}
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
/* new version of sched-fork() */
void
sched_fork(struct thread *td, struct proc *p2)
{
struct kse *ke2;
struct ksegrp *kg2;
struct thread *td2;
ke2 = FIRST_KSE_IN_PROC(p2);
td2 = FIRST_THREAD_IN_PROC(p2);
kg2 = FIRST_KSEGRP_IN_PROC(p2);
bzero(&ke2->ke_startzero,
(unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
ke2->ke_state = KES_THREAD;
ke2->ke_thread = td2;
td2->td_kse = ke2;
/*ke2->ke_sched->ske_cpticks = 0; */ /* XXX FIX! */
kg2->kg_estcpu = td->td_ksegrp->kg_estcpu;
}
int
sched_thread_fork(struct thread *td1, struct thread *td2)
{
struct kse *ke2;
/* Initialize our kse structure. */
ke2 = kse_alloc();
if (ke2 == NULL)
return (ENOMEM);
bzero(&ke2->ke_startzero,
RANGEOF(struct kse, ke_startzero, ke_endzero));
kse_link(ke2, td2->td_ksegrp);
td2->td_kse = ke2;
ke2->ke_thread = td2;
return (0);
}
void
sched_tdexit(struct thread *td)
{
struct kse *ke;
ke = td->td_kse;
ke->ke_state = KES_UNQUEUED;
ke->ke_thread = NULL;
if (ke->ke_flags & KEF_EXIT)
kse_unlink(ke);
else
kse_reassign(ke);
td->td_kse = NULL;
td->td_last_kse = NULL;
}
void
sched_thr_exit(struct thread *td)
{
struct kse *ke;
ke = td->td_kse;
ke->ke_state = KES_UNQUEUED;
ke->ke_thread = NULL;
kse_unlink(ke);
sched_exit_kse(TAILQ_NEXT(ke, ke_kglist), ke);
}
sys/sys/ksevar.h 0100644 0000000 0000000 00000011705 07647367077 013031 0 ustar root wheel /*
* Copyright (C) 2001 Julian Elischer <julian@freebsd.org>
* for the FreeBSD Foundation.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice(s), this list of conditions and the following disclaimer as
* the first lines of this file unmodified other than the possible
* addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
* notice(s), this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* $FreeBSD: src/sys/sys/kse.h,v 1.12 2003/03/19 05:49:37 davidxu Exp $
*/
#ifndef _SYS_KSEVAR_H_
#define _SYS_KSEVAR_H_
#ifndef _KERNEL
#error "Not for use outside the kernel"
#else
#define KSEDEF kse
/*
* The second structure is the Kernel Schedulable Entity. (KSE)
* It represents the ability to take a slot in the scheduler queue.
* As long as this is scheduled, it could continue to run any threads that
* are assigned to the KSEGRP (see later) until either it runs out
* of runnable threads of high enough priority, or CPU.
* It runs on one CPU and is assigned a quantum of time. When a thread is
* blocked, The KSE continues to run and will search for another thread
* in a runnable state amongst those it has. It May decide to return to user
* mode with a new 'empty' thread if there are no runnable threads.
* Threads are temporarily associated with a KSE for scheduling reasons.
*/
struct kse;
/***************
* In pictures:
With a single run queue used by all processors:
RUNQ: --->KSE---KSE--... SLEEPQ:[]---THREAD---THREAD---THREAD
| / []---THREAD
KSEG---THREAD--THREAD--THREAD []
[]---THREAD---THREAD
(processors run THREADs from the KSEG until they are exhausted or
the KSEG exhausts its quantum)
With PER-CPU run queues:
KSEs on the separate run queues directly
They would be given priorities calculated from the KSEG.
*
*****************/
/*
* The schedulable entity that can be given a context to run.
* A process may have several of these. Probably one per processor
* but posibly a few more. In this universe they are grouped
* with a KSEG that contains the priority and niceness
* for the group.
*/
struct kse {
struct proc *ke_proc; /* Associated process. */
struct ksegrp *ke_ksegrp; /* Associated KSEG. */
TAILQ_ENTRY(kse) ke_kglist; /* Queue of all KSEs in ke_ksegrp. */
TAILQ_ENTRY(kse) ke_kgrlist; /* Queue of all KSEs in this state. */
TAILQ_ENTRY(kse) ke_procq; /* (j) Run queue. */
#define ke_startzero ke_flags
int ke_flags; /* (j) KEF_* flags. */
struct thread *ke_thread; /* Active associated thread. */
fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
u_char ke_oncpu; /* (j) Which cpu we are on. */
char ke_rqindex; /* (j) Run queue index. */
enum {
KES_UNUSED = 0x0,
KES_IDLE,
KES_ONRUNQ,
KES_UNQUEUED, /* in transit */
KES_THREAD /* slaved to thread state */
} ke_state; /* (j) S* process status. */
#define ke_endzero ke_dummy
u_char ke_dummy;
struct ke_sched *ke_sched; /* Scheduler specific data */
};
/* flags kept in ke_flags */
#define KEF_IDLEKSE 0x00004 /* A 'Per CPU idle process'.. has one thread */
#define KEF_DIDRUN 0x02000 /* KSE actually ran. */
#define KEF_EXIT 0x04000 /* KSE is being killed. */
#define FOREACH_KSE_IN_GROUP(kg, ke) \
TAILQ_FOREACH((ke), &(kg)->kg_kseq, ke_kglist)
/* XXXKSE the lines below should probably only be used in 1:1 code */
#define FIRST_KSE_IN_KSEGRP(kg) TAILQ_FIRST(&kg->kg_kseq)
#define FIRST_KSE_IN_PROC(p) FIRST_KSE_IN_KSEGRP(FIRST_KSEGRP_IN_PROC(p))
extern struct kse kse0; /* Primary kse in proc0 */
/* void proc_linkup(struct proc *p, struct ksegrp *kg,
struct kse *ke, struct thread *td); */
/* New in KSE. */
struct kse *kse_alloc(void);
void kse_free(struct kse *ke);
void kse_stash(struct kse *ke);
void kse_reassign(struct kse *ke);
void kse_link(struct kse *ke, struct ksegrp *kg);
void kse_unlink(struct kse *ke);
#endif /* !_KERNEL */
#endif /* !_SYS_KSEVAR_H_ */
help
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?Pine.BSF.4.21.0304162233240.94222-101000>
