Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 31 May 2004 13:18:14 -0700 (PDT)
From:      Julian Elischer <julian@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 53890 for review
Message-ID:  <200405312018.i4VKIEOn030681@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=53890

Change 53890 by julian@julian_ref on 2004/05/31 13:17:54

	 more work nm thread cleanup etc.

Affected files ...

.. //depot/projects/nsched/sys/kern/kern_thr.c#6 edit
.. //depot/projects/nsched/sys/kern/kern_thread.c#17 edit
.. //depot/projects/nsched/sys/kern/sched_4bsd.c#10 edit
.. //depot/projects/nsched/sys/sys/sched.h#5 edit

Differences ...

==== //depot/projects/nsched/sys/kern/kern_thr.c#6 (text+ko) ====

@@ -79,11 +79,13 @@
 	/* Clean up cpu resources. */
 	cpu_thread_exit(td);
 
+	/* let the scheduler know we are dying.. */
+	/* Lots in common with sched_thread_exit.. merge one day */
+	sched_thr_exit(td);
+
 	/* Unlink the thread from the process and kseg. */
 	thread_unlink(td);
 
-	sched_thr_exit(td);
-
 	/*
 	 * If we were stopped while waiting for all threads to exit and this
 	 * is the last thread wakeup the exiting thread.
@@ -98,7 +100,6 @@
 	td->td_proc = NULL;
 #endif
 	td->td_ksegrp = NULL;
-	sched_exit_thread(p->p_pptr, td);
 	thread_stash(td);
 
 	cpu_throw(td, choosethread(SW_VOL));

==== //depot/projects/nsched/sys/kern/kern_thread.c#17 (text+ko) ====

@@ -60,8 +60,6 @@
 
 #include <machine/frame.h>
 
-/* extern void kse_initialise(void);*/
-extern void kse_GC(void);
 /*
  * KSEGRP related storage.
  */
@@ -559,11 +557,11 @@
 	p = td->td_proc;
 
 	mtx_assert(&sched_lock, MA_OWNED);
+	mtx_assert(&Giant, MA_NOTOWNED);
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 	KASSERT(p != NULL, ("thread exiting without a process"));
 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
-	PROC_LOCK_ASSERT(p, MA_OWNED);
 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
-	mtx_assert(&Giant, MA_NOTOWNED);
 
 	if (td->td_standin != NULL) {
 		thread_stash(td->td_standin);
@@ -572,6 +570,10 @@
 
 	/* drop FPU & debug register state */
 	cpu_thread_exit(td);	/* XXXSMP */
+
+	/* The thread is exiting. scheduler can release its stuff. */
+	sched_thread_exit(td);
+
 	/*
 	 * The last thread is left attached to the process
 	 * So that the whole bundle gets recycled. Skip
@@ -580,6 +582,11 @@
 	if (p->p_flag & P_HADTHREADS) {
 		if (p->p_numthreads > 1) {
 			thread_unlink(td);
+				
+			/* 
+			 * as we are exiting there is room for another
+			 * to be created.
+			 */
 			if (p->p_maxthrwaits)
 				wakeup(&p->p_numthreads);
 			/*
@@ -607,13 +614,17 @@
 				upcall_remove(td);
 
 			if (kg->kg_numthreads == 0) {
-				/* This kseg is kaput */
+				/* This kseg is kaput
+				 * First allow teh scheduler to free anything
+				 * it may have assigned to it. Then allow it 
+				 * to do scheduler accounting.. merge these
+				 * eventually.
+				 */
 				sched_set_concurrancy(kg, 0);
                         	sched_exit_ksegrp(p, td); /* XXX fix */
 				ksegrp_unlink(kg);
 			}
 			
-			sched_exit_thread(td->td_proc->p_pptr, td);
 			td->td_state	= TDS_INACTIVE;
 	#if 0
 			td->td_proc	= NULL;
@@ -621,13 +632,10 @@
 			td->td_ksegrp	= NULL;
 			PCPU_SET(deadthread, td);
 		} else {
-			if (p->p_numthreads == 1 ) {
-				sched_set_concurrancy(kg, 1);
-			}
+			sched_set_concurrancy(kg, 1);
 		}
 	}
 	PROC_UNLOCK(p);
-	mtx_assert(&sched_lock, MA_OWNED);
 	cpu_throw(td, choosethread(SW_VOL));
 	panic("I'm a teapot!");
 	/* NOTREACHED */

==== //depot/projects/nsched/sys/kern/sched_4bsd.c#10 (text+ko) ====

@@ -165,6 +165,9 @@
 void	kse_reassign(struct kse *ke);
 void	sched_fork_kse(struct thread *parenttd, struct kse *newke);
 void	sched_unrun_kse(struct proc *parent, struct thread *childtd);
+static struct kse * kse_alloc(void);
+static void	kse_link(struct kse *ke, struct ksegrp *kg);
+static void	kse_unlink(struct kse *ke);
 
 #define KTR_4BSD	0x0
 
@@ -648,10 +651,16 @@
  * aggregated all the estcpu into the 'built-in' ksegrp.
  */
 void
-sched_exit(struct proc *parent, struct thread *childtd)
+sched_exit(struct proc *parent, struct thread *td)
 {
-	sched_exit_ksegrp(parent, childtd);
-	sched_exit_thread(parent, childtd);
+	struct ksegrp *kg;
+
+	mtx_assert(&sched_lock, MA_OWNED);
+
+	kg = FIRST_KSEGRP_IN_PROC(parent);  /* XXXKSE */ 
+	td->td_kse->ke_flags |= KEF_EXIT;
+	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu +
+			    td->td_ksegrp->kg_estcpu);
 }
 
 /*
@@ -661,21 +670,67 @@
 void
 sched_exit_ksegrp(struct proc *parent, struct thread *child)
 {
+	
+}
+
+/*
+ * XXX make sure that for the last thread we 
+ * leave it linked to its kse..
+ */
+void
+sched_thread_exit(struct thread *td)
+{
+	struct kse *ke;
+#if 0
 	struct ksegrp *kg;
+	struct proc *p;
+#endif
 
-	mtx_assert(&sched_lock, MA_OWNED);
-	kg = FIRST_KSEGRP_IN_PROC(parent);  /* XXXKSE */ 
-	child->td_kse->ke_flags |= KEF_EXIT;
-	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->td_ksegrp->kg_estcpu);
+	ke = td->td_kse;
+
+	if ((td->td_proc->p_flag & P_SA) && ke != NULL) {
+		ke->ke_thread = NULL;
+		td->td_kse = NULL;
+		kse_reassign(ke);
+	}
+	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+		sched_tdcnt--;
+	
+#if 0 /* need to account for kg_idleq etc. */
+	p = td->td_proc;
+	if ((p->p_numthreads == 1) &&
+	    ((kg = td->td_ksegrp)->kg_numthreads == 1)) {
+		/* We are the last thread/kseg.. */
+		proc_linkup(p, kg, td);
+		kse_link(ke, kg);
+		ke.ke_thread = td;
+		td.td_kse = ke; /* we are running */
+		ke.ke_state = KES_THREAD;
+	}
+#endif
 }
 
+/* 
+ * special version of the above for thr..
+ * work towards merging them.
+ */
 void
-sched_exit_thread(struct proc *parent, struct thread *childtd)
+sched_thr_exit(struct thread *td)
 {
-	if ((childtd->td_proc->p_flag & P_NOLOAD) == 0)
+	struct kse *ke;
+
+	ke = td->td_kse;
+
+	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 		sched_tdcnt--;
+
+	/* td is about to be freed, but keep it clean */
+	td->td_kse	= NULL;
+	td->td_last_kse	= NULL;
+	kse_unlink(ke); 	/* also frees it */
 }
 
+
 void
 sched_fork(struct thread *parenttd, struct proc *child)
 {
@@ -1003,9 +1058,6 @@
 static struct kg_sched kg_sched0;
 static struct td_sched td_sched0;
 
-static struct kse * kse_alloc(void);
-static void	kse_link(struct kse *ke, struct ksegrp *kg);
-static void	kse_unlink(struct kse *ke);
 
 extern struct mtx kse_zombie_lock;
 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
@@ -1090,21 +1142,6 @@
 }
 #undef RANGEOF
 
-void
-sched_thr_exit(struct thread *td)
-{
-	struct kse *ke;
-
-	ke = td->td_kse;
-
-	ke->ke_state = KES_UNQUEUED;
-	ke->ke_thread = NULL;
-	kse_unlink(ke);
-	sched_unrun_kse(td->td_proc->p_pptr, td);
-	td->td_kse = NULL;
-	td->td_last_kse = NULL;
-}
-
 /*
  * Allocate a kse.
  */
@@ -1256,28 +1293,6 @@
 void
 sched_unrun_kse(struct proc *parent, struct thread *childtd)
 {
-		struct ksegrp *kg;
-		struct kse *ke;
-
-		kg = childtd->td_ksegrp;
-		ke = childtd->td_kse;
-		KASSERT((ke),("unexpected null KSE ptr in sched_unrun_kse()"));
-		ke->ke_state = KES_UNQUEUED;
-		ke->ke_thread = NULL;
-		/*
-		 * Decide what to do with the KSE attached to this thread.
-		 */
-		if (ke->ke_flags & KEF_EXIT) {
-			kse_unlink(ke);
-			if (kg->kg_kses == 0) {
-				sched_exit_ksegrp(parent, childtd); /* XXXKSE */
-				ksegrp_unlink(kg);
-			}
-		} else {
-			kse_reassign(ke);
-		}
-		childtd->td_kse	= NULL;
-		childtd->td_last_kse	= NULL;
 }
 
 /* 

==== //depot/projects/nsched/sys/sys/sched.h#5 (text+ko) ====

@@ -47,6 +47,7 @@
  * Supply the thread that is running abd the other process.
  */
 void	sched_exit(struct proc *parent, struct thread *child);
+void	sched_thread_exit(struct thread *td);
 void	sched_fork(struct thread *td, struct proc *child);
 
 /*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200405312018.i4VKIEOn030681>