Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 12 Jun 2004 01:32:37 GMT
From:      Julian Elischer <julian@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 54668 for review
Message-ID:  <200406120132.i5C1WbUm013871@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=54668

Change 54668 by julian@julian_ref on 2004/06/12 01:32:19

	slight cleanups

Affected files ...

.. //depot/projects/nsched/sys/kern/kern_kse.c#13 edit
.. //depot/projects/nsched/sys/kern/sched_4bsd.c#13 edit

Differences ...

==== //depot/projects/nsched/sys/kern/kern_kse.c#13 (text+ko) ====

@@ -919,7 +919,6 @@
 	td2->td_inhibitors = 0;
 	SIGFILLSET(td2->td_sigmask);
 	SIG_CANTMASK(td2->td_sigmask);
-	sched_fork_thread(td, td2);
 	return (td2);	/* bogus.. should be a void function */
 }
 

==== //depot/projects/nsched/sys/kern/sched_4bsd.c#13 (text+ko) ====

@@ -61,7 +61,9 @@
 #include <vm/uma.h>
 #include <machine/critical.h>
 
-/*
+/********************************************************************
+ *     Definitions of the "KSE" structure.
+ *
  * This is a scheduler private structure that it uses (for now)
  * to implement the thread fairness algorythm.
  * The threads are made runnable by the rest of the system, but 
@@ -116,8 +118,9 @@
 #define	KEF_EXIT	0x00002	/* KSE is being killed. */
 #define	KEF_DIDRUN	0x00004	/* KSE actually ran. */
 
-/*
+/***************************************************************
  * Scheduler private extensions to thread, ksegrp and proc structures.
+ *     
  * These are invisile outside the scheduler.
  * They are usually allocated beyond the end of the proc, thread or ksegrp
  * structure and always accessed via an indirection. 
@@ -160,16 +163,19 @@
 #define	FIRST_KSE_IN_KSEGRP(kg)	TAILQ_FIRST(&(kg)->kg_kseq)
 #define	FIRST_KSE_IN_PROC(p)	FIRST_KSE_IN_KSEGRP(FIRST_KSEGRP_IN_PROC(p))
 
-void	kse_free(struct kse *ke);
-void	kse_stash(struct kse *ke);
-void	kse_reassign(struct kse *ke);
-void	sched_fork_kse(struct thread *parenttd, struct kse *newke);
-void	sched_unrun_kse(struct proc *parent, struct thread *childtd);
+
+/****************************************************************
+ * function prototypes 
+ */
+static void	kse_free(struct kse *ke);
+static void	kse_stash(struct kse *ke);
+static void	kse_reassign(struct kse *ke);
 static struct kse * kse_alloc(void);
 static void	kse_link(struct kse *ke, struct ksegrp *kg);
 static void	kse_unlink(struct kse *ke);
 
 #define KTR_4BSD	0x0
+#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
 
 /*
  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
@@ -579,10 +585,6 @@
 	sched_tdcnt++;
 }
 
-
-
-
-
 /*******************************************************************
  * These functions represent entrypoints that a scheduler needs to *
  * Supply in order to give the rest of the system opportunities to *
@@ -673,18 +675,10 @@
 	sched_set_concurrancy(td->td_ksegrp, 0);
 }
 
-/*
- * XXX make sure that for the last thread we 
- * leave it linked to its kse..
- */
 void
 sched_thread_exit(struct thread *td)
 {
 	struct kse *ke;
-#if 0
-	struct ksegrp *kg;
-	struct proc *p;
-#endif
 
 	ke = td->td_kse;
 
@@ -696,18 +690,6 @@
 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 		sched_tdcnt--;
 	
-#if 0 /* need to account for kg_idleq etc. */
-	p = td->td_proc;
-	if ((p->p_numthreads == 1) &&
-	    ((kg = td->td_ksegrp)->kg_numthreads == 1)) {
-		/* We are the last thread/kseg.. */
-		proc_linkup(p, kg, td);
-		kse_link(ke, kg);
-		ke.ke_thread = td;
-		td.td_kse = ke; /* we are running */
-		ke.ke_state = KES_THREAD;
-	}
-#endif
 }
 
 /* 
@@ -732,35 +714,26 @@
 
 
 void
-sched_fork(struct thread *parenttd, struct proc *child)
+sched_fork(struct thread *td, struct proc *child)
 {
-	struct thread *td2;
-	struct kse *ke2;
+	struct thread *newtd;
+	struct kse *newke;
 
-	td2 = FIRST_THREAD_IN_PROC(child);
-	ke2 = FIRST_KSE_IN_PROC(child);
-	sched_fork_kse(parenttd, ke2);
-	sched_fork_ksegrp(parenttd, FIRST_KSEGRP_IN_PROC(child));
-        ke2->ke_thread = td2;
-	td2->td_kse = ke2;
-	/* sched_fork_thread(parenttd, td2);*/
+	newtd = FIRST_THREAD_IN_PROC(child);
+	newke = FIRST_KSE_IN_PROC(child);
+	bzero(&newke->ke_startzero,
+		(unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
+	newke->ke_state = KES_THREAD;
+	newke->ke_cpticks = 0;
+	sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(child));
+        newke->ke_thread = newtd;
+	newtd->td_kse = newke;
 }
 
 void
-sched_fork_ksegrp(struct thread *parent, struct ksegrp *child)
+sched_fork_ksegrp(struct thread *td, struct ksegrp *newkg)
 {
-	struct ksegrp *kg;
-
-	mtx_assert(&sched_lock, MA_OWNED);
-	kg =  parent->td_ksegrp;
-	child->kg_estcpu = kg->kg_estcpu;
-}
-
-
-
-void
-sched_fork_thread(struct thread *parent, struct thread *child)
-{
+	newkg->kg_estcpu = td->td_ksegrp->kg_estcpu;
 }
 
 void
@@ -1110,37 +1083,36 @@
  * for now have special thr code
  * later on, clean these up into common code.
  */
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
 int
-sched_thr_newthread(struct thread *td, struct thread *td0, int flags)
+sched_thr_newthread(struct thread *td, struct thread *newtd, int flags)
 {
-	struct kse *ke0;
+	struct kse *newke;
 	/* Initialize our kse structure. */
-	ke0 = kse_alloc();
-	bzero(&ke0->ke_startzero,
+	newke = kse_alloc();
+	bzero(&newke->ke_startzero,
 	    RANGEOF(struct kse, ke_startzero, ke_endzero));
 
 	/* Link the thread and kse into the ksegrp and make it runnable. */
 	mtx_lock_spin(&sched_lock);
 
-	thread_link(td0, td->td_ksegrp);
-	kse_link(ke0, td->td_ksegrp);
+	thread_link(newtd, td->td_ksegrp);
+	kse_link(newke, td->td_ksegrp);
 
 	/* Bind this thread and kse together. */
-	td0->td_kse = ke0;
-	ke0->ke_thread = td0;
+	newtd->td_kse = newke;
+	newke->ke_thread = newtd;
+	bzero(&newke->ke_startzero,
+		(unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
+	newke->ke_state = KES_THREAD;
+	newke->ke_cpticks = 0;
 
-	sched_fork_kse(td, td->td_kse);
-	sched_fork_thread(td, td0);
-
-	TD_SET_CAN_RUN(td0);
+	TD_SET_CAN_RUN(newtd);
 	if ((flags & THR_SUSPENDED) == 0)
-		setrunqueue(td0);
+		setrunqueue(newtd);
 
 	mtx_unlock_spin(&sched_lock);
 	return (0);	/* the API could fail but not in this case */
 }
-#undef RANGEOF
 
 /*
  * Allocate a kse.
@@ -1248,53 +1220,6 @@
 	kse_stash(ke);
 }
 
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
-
-/* new version of sched-fork() */
-void
-sched_fork_kse(struct thread *parenttd, struct kse *ke2)
-{
-	struct ksegrp *kg2;
-
-        kg2 = ke2->ke_ksegrp;
-        bzero(&ke2->ke_startzero,
-            (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
-        ke2->ke_state = KES_THREAD;
-        ke2->ke_cpticks = 0;
-        kg2->kg_estcpu = parenttd->td_ksegrp->kg_estcpu;
-}
-
-/*
- * Try handle the generic case where there may be > 1 kseg even
- * though that should never happen. It should be cheap to do so.
- */
-void
-sched_destroyproc(struct proc *p)
-{
-	struct ksegrp *kg;
-	struct kse *ke;
-
-	/* remove all the kses we can find and free them */
-	FOREACH_KSEGRP_IN_PROC(p, kg) {
-		while (!TAILQ_EMPTY(&kg->kg_kseq)) {
-			ke = TAILQ_FIRST(&kg->kg_kseq);
-			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
-			if (ke->ke_thread) 
-				ke->ke_thread->td_kse = NULL;
-			kse_free(ke);
-		}
-	}
-}
-
-/* 
- * The current KSE is being dumped. Clear stuff and
- * look to see if another thread needs us.
- */
-void
-sched_unrun_kse(struct proc *parent, struct thread *childtd)
-{
-}
-
 /* 
  * Whenever we have idle KSEs and there are too many for the concurrancy,
  * then free as many as we can. Don't free too many if we have threads
@@ -1303,12 +1228,34 @@
 #define REDUCE_KSES(kg, skg) 					\
 do {								\
 	while ((skg->skg_concurrancy < skg->skg_kses) &&	\
-    	(skg->skg_idle_kses > 0) &&				\
-	(skg->skg_kses > kg->kg_numthreads)) {			\
+    	    (skg->skg_idle_kses > 0) &&				\
+	    (skg->skg_kses > kg->kg_numthreads)) {			\
 		kse_unlink(TAILQ_FIRST(&skg->skg_iq));		\
 	}							\
 } while (0)
+
+/*
+ * Called by the uma process fini routine..
+ * undo anything we may have done in the uma_init method.
+ * Panic if it's not all 1:1:1:1
+ */
+void
+sched_destroyproc(struct proc *p)
+{
+	struct ksegrp *kg;
+	struct kg_sched *skg;
+	
+	KASSERT((p->p_numthreads == 1), ("Cached proc with > 1 thread "));
+	KASSERT((p->p_numksegrps == 1), ("Cached proc with > 1 ksegrp "));
+
+	kg = FIRST_KSEGRP_IN_PROC(p);
 	
+	KASSERT((kg->kg_kses == 1), ("Cached proc with > 1 kse "));
+
+	skg = kg->kg_sched;
+	kse_unlink(TAILQ_FIRST(&skg->skg_iq));		\
+}
+
 void
 sched_set_concurrancy(struct ksegrp *kg, int concurrancy)
 {
@@ -1330,7 +1277,10 @@
 #endif
 		mtx_lock_spin(&sched_lock);
 		kse_link(newke, kg);
-		sched_fork_kse(curthread, newke);
+		bzero(&newke->ke_startzero,
+		    (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
+		newke->ke_state = KES_THREAD;
+		newke->ke_cpticks = 0;
 		/* Add engine */
 		kse_reassign(newke);
 		mtx_unlock_spin(&sched_lock);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200406120132.i5C1WbUm013871>