Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 22 May 2002 22:04:19 -0700 (PDT)
From:      Jonathan Mini <mini@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 11779 for review
Message-ID:  <200205230504.g4N54J914922@freefall.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://people.freebsd.org/~peter/p4db/chv.cgi?CH=11779

Change 11779 by mini@mini_stylus on 2002/05/22 22:03:18

	Many whitespace nits and comment fixups.

Affected files ...

... //depot/projects/kse/sys/i386/i386/vm_machdep.c#43 edit
... //depot/projects/kse/sys/i386/include/pcb_ext.h#7 edit
... //depot/projects/kse/sys/kern/init_main.c#41 edit
... //depot/projects/kse/sys/kern/kern_condvar.c#33 edit
... //depot/projects/kse/sys/kern/kern_exec.c#26 edit
... //depot/projects/kse/sys/kern/kern_exit.c#49 edit
... //depot/projects/kse/sys/kern/kern_fork.c#68 edit
... //depot/projects/kse/sys/kern/kern_idle.c#13 edit
... //depot/projects/kse/sys/kern/kern_proc.c#60 edit
... //depot/projects/kse/sys/kern/kern_sig.c#49 edit
... //depot/projects/kse/sys/kern/kern_switch.c#44 edit
... //depot/projects/kse/sys/kern/kern_synch.c#61 edit
... //depot/projects/kse/sys/kern/kern_thread.c#48 edit
... //depot/projects/kse/sys/kern/subr_trap.c#55 edit
... //depot/projects/kse/sys/kern/sys_process.c#26 edit
... //depot/projects/kse/sys/sys/kse.h#7 edit
... //depot/projects/kse/sys/sys/proc.h#101 edit

Differences ...

==== //depot/projects/kse/sys/i386/i386/vm_machdep.c#43 (text+ko) ====

@@ -262,7 +262,7 @@
 	mdp = &td->td_proc->p_md;
 	if (mdp->md_ldt)
 		user_ldt_free(td);
-        reset_dbregs();
+	reset_dbregs();
 }
 
 void
@@ -274,7 +274,7 @@
 #endif
 	if (pcb->pcb_ext != 0) {
 		/* XXXKSE  XXXSMP  not SMP SAFE.. what locks do we have? */
-		/*if (pcb->pcb_ext->refcount-- == 1) ?? */
+		/* if (pcb->pcb_ext->refcount-- == 1) ?? */
 	        /* 
 		 * XXX do we need to move the TSS off the allocated pages 
 		 * before freeing them?  (not done here)
@@ -424,7 +424,7 @@
 	uframe = &tm->ctx.tfrm.tf_tf;
 	error = copyout(frame, uframe, sizeof(*frame));
 	/*
-	 * "What about the fp regs?" I hear you ask.... XXXKSE 
+	 * "What about the fp regs?" I hear you ask.... XXXKSE
 	 * Don't know where gs and "onstack" come from.
 	 * May need to fiddle a few other values too.
 	 */

==== //depot/projects/kse/sys/i386/include/pcb_ext.h#7 (text+ko) ====

@@ -41,7 +41,7 @@
 	struct 	i386tss	ext_tss;	/* per-process i386tss */
 	caddr_t	ext_iomap;		/* i/o permission bitmap */
 	struct	vm86_kernel ext_vm86;	/* vm86 area */
-	int refcount;
+	int	refcount;
 };
 
 #ifdef _KERNEL

==== //depot/projects/kse/sys/kern/init_main.c#41 (text+ko) ====

@@ -338,10 +338,9 @@
 	ke->ke_oncpu = 0;
 	ke->ke_state = KES_RUNNING;
 	ke->ke_thread = td;
-	/* proc_linkup puts it in the idle queue, that's not what we want */
+	/* proc_linkup puts it in the idle queue, that's not what we want. */
 	TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
 	kg->kg_idle_kses--;
-	
 	p->p_peers = 0;
 	p->p_leader = p;
 
@@ -671,7 +670,7 @@
 
 	td = FIRST_THREAD_IN_PROC(initproc);
 	mtx_lock_spin(&sched_lock);
-	setrunqueue(td); /* XXXKSE */
+	setrunqueue(td);	/* XXXKSE */
 	mtx_unlock_spin(&sched_lock);
 }
 SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)

==== //depot/projects/kse/sys/kern/kern_condvar.c#33 (text+ko) ====

@@ -109,32 +109,31 @@
  * Common code for cv_wait* functions.  All require sched_lock.
  */
 
-/* 
- * decide if we need to queue an upcall
- * Copied from msleep().. maybe make this a common function.
+/*
+ * Decide if we need to queue an upcall.
+ * This is copied from msleep(), perhaps this should be a common function.
  */
 static void
 cv_check_upcall(struct thread *td)
 {
 
 	/*
-	 * If we are capable of async syscalls, and there isn't already
-	 * another one ready to return, then start a new thread
-	 * and queue it ready to run. Note there is danger here as we need
-	 * to make sure that we don't sleep getting it
-	 * (recursign might be bad). Hense the TDF_INMSLEEP flag.
+	 * If we are capable of async syscalls and there isn't already
+	 * another one ready to return, start a new thread
+	 * and queue it as ready to run. Note that there is danger here
+	 * because we need to make sure that we don't sleep allocating
+	 * the thread (recursion here might be bad).
+	 * Hence the TDF_INMSLEEP flag.
 	 */
-	if ((td->td_proc->p_flag & P_KSES) &&
-	    td->td_mailbox &&
+	if ((td->td_proc->p_flag & P_KSES) && td->td_mailbox &&
 	    (td->td_flags & TDF_INMSLEEP) == 0) {
-		/* 
-		 * If we have no queued work to do, then
-		 * upcall to the UTS to see if it has more to do.
-		 * We don't need to upcall now, just make it and
-		 * queue it.
+		/*
+		 * If we have no queued work to do,
+		 * upcall to the UTS to see if it has more work.
+		 * We don't need to upcall now, just queue it.
 		 */
 		if (TAILQ_FIRST(&td->td_ksegrp->kg_runq) == NULL) {
-			/* don't recurse here! */
+			/* Don't recurse here! */
 			td->td_flags |= TDF_INMSLEEP;
 			thread_schedule_upcall(td, td->td_kse);
 			td->td_flags &= ~TDF_INMSLEEP;
@@ -186,7 +185,7 @@
 	if (sig != 0) {
 		if (td->td_wchan != NULL)
 			cv_waitq_remove(td);
-		td->td_state = TDS_RUNNING; /* XXXKSE */
+		td->td_state = TDS_RUNNING;	/* XXXKSE */
 	} else if (td->td_wchan != NULL) {
 		cv_switch(td);
 	}
@@ -322,7 +321,7 @@
 
 	PROC_LOCK(p);
 	if (sig == 0)
-		sig = CURSIG(td);  /* XXXKSE */
+		sig = CURSIG(td);	/* XXXKSE */
 	if (sig != 0) {
 		if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
 			rval = EINTR;
@@ -611,7 +610,7 @@
 		td->td_flags &= ~TDF_TIMEOUT;
 		setrunqueue(td);
 	} else if (td->td_wchan != NULL) {
-		if (td->td_state == TDS_SLP) /* XXXKSE */
+		if (td->td_state == TDS_SLP)	/* XXXKSE */
 			setrunnable(td);
 		else
 			cv_waitq_remove(td);
@@ -621,10 +620,9 @@
 	mtx_unlock_spin(&sched_lock);
 }
 
-/* 
- * For now only abort interruptable waits
- * others will have to either complete on their own
- * or have a timeout.
+/*
+ * For now only abort interruptable waits.
+ * The others will have to either complete on their own or have a timeout.
  */
 void
 cv_abort(struct thread *td)
@@ -634,7 +632,7 @@
 	    td->td_proc->p_pid,
 	    td->td_proc->p_comm);
 	mtx_lock_spin(&sched_lock);
-	if ((td->td_flags & (TDF_SINTR| TDF_TIMEOUT)) == TDF_SINTR) {
+	if ((td->td_flags & (TDF_SINTR|TDF_TIMEOUT)) == TDF_SINTR) {
 		if (td->td_wchan != NULL) {
 			if (td->td_state == TDS_SLP) {
 				setrunnable(td);
@@ -646,4 +644,3 @@
 	mtx_unlock_spin(&sched_lock);
 }
 
-

==== //depot/projects/kse/sys/kern/kern_exec.c#26 (text+ko) ====

@@ -153,12 +153,12 @@
 	if ((p->p_flag & P_KSES) && thread_single(SNGLE_EXIT)) {
 		PROC_UNLOCK(p);
 		mtx_unlock(&Giant);
-		return (ERESTART); /* try again later */
+		return (ERESTART);	/* Try again later. */
 	}
-	/* If we get here all other threads are dead */
+	/* If we get here all other threads are dead. */
 	p->p_flag |= P_INEXEC;
 	PROC_UNLOCK(p);
-	
+
 	/*
 	 * Initialize part of the common data
 	 */

==== //depot/projects/kse/sys/kern/kern_exit.c#49 (text+ko) ====

@@ -145,43 +145,41 @@
 	PROC_LOCK(p);
 	if (p->p_flag & P_KSES) {
 		/*
-		 * first kill off the other threads. This requires 
+		 * First kill off the other threads. This requires
 		 * Some co-operation from other parts of the kernel
 		 * so it may not be instant.
-		 */ 
+		 */
 		thread_suspend_check(0);
 		/*
 		 * Here is a trick..
 		 * We need to free up our KSE to process other threads
-		 * so we can safely set the UNBOUND flag,
-		 * whether or not we have a mailbox, as we are NEVER
-		 * going to return to the user anyhow.
-		 * The flag will not already be set if we are exiting
-		 * because of a signal or a pagefault or similar.
-		 *  (or even an exit(2) from the UTS)
+		 * so that we can safely set the UNBOUND flag
+		 * (whether or not we have a mailbox) as we are NEVER
+		 * going to return to the user.
+		 * The flag will not be set yet if we are exiting
+		 * because of a signal, pagefault, or similar
+		 * (or even an exit(2) from the UTS).
 		 */
 		td->td_flags |= TDF_UNBOUND;
 		if (thread_single(SNGLE_EXIT)) {
-			/* Can't happen ?? Maybe it  can XXXKSE*/
+			/* This can't happen.. maybe it can XXXKSE */
 			panic ("Exit: Single threading fouled up");
 		}
-		/* All other activity in this process is now stopped */
-		/* remove excess KSEs and KSEGRPS */
+		/* All other activity in this process is now stopped. */
+		/* Remove excess KSEs and KSEGRPS. */
 		/* ... */
-		/* turn off threading support */
+		/* Turn off threading support. */
 		p->p_flag &= ~P_KSES;
 	}
 	/*
 	 * With this state set:
-	 * Any thread entering  the kernel from userspace
-	 * will thread_exit() in trap().
-	 * Any thread attempting to sleep will return immediatly
+	 * Any thread entering the kernel from userspace will thread_exit()
+	 * in trap().  Any thread attempting to sleep will return immediatly
 	 * with EINTR or EWOULDBLOCK, which will hopefully force them
-	 * to back out to userland, freeing resources as they go,
-	 * and anything attempting to return to userland will
-	 * thread_exit() from userret().
-	 * thread_exit() will do a wakeup on p->p_numthreads if
-	 * it transitions to 1. Well it's done in thread_unlink() but...
+	 * to back out to userland, freeing resources as they go, and
+	 * anything attempting to return to userland will thread_exit()
+	 * from userret().  thread_exit() will do a wakeup on p->p_numthreads
+	 * if it transitions to 1.
 	 */
 
 	p->p_flag |= P_WEXIT;
@@ -698,7 +696,7 @@
 				p->p_procsig = NULL;
 			}
 
-			/* Free the KSE spare thread */
+			/* Free the KSE spare thread. */
 			if (p->p_kse.ke_tdspare != NULL)
 				thread_free(p->p_kse.ke_tdspare);
 

==== //depot/projects/kse/sys/kern/kern_fork.c#68 (text+ko) ====

@@ -287,11 +287,10 @@
 		 * need to either be suspended or in the kernel,
 		 * where they will try restart in the parent and will
 		 * be aborted in the child.
-		 * (it is possible we could restart them there as well!)
-		 */ 
+		 */
 		PROC_LOCK(p1);
 		if (thread_single(SNGLE_WAIT)) {
-			/* abort.. someone else is single threading before us */
+			/* Abort.. someone else is single threading before us */
 			PROC_UNLOCK(p1);
 			return (ERESTART);
 		}
@@ -501,20 +500,20 @@
 	 * XXXKSE Theoretically only the running thread would get copied 
 	 * Others in the kernel would be 'aborted' in the child.
 	 * i.e return E*something*
-	 * On SMP we would have to stop them running on 
+	 * On SMP we would have to stop them running on
 	 * other CPUs! (set a flag in the proc that stops
 	 * all returns to userland until completed)
 	 * This is wrong but ok for 1:1.
 	 */
 	proc_linkup(p2, kg2, ke2, td2);
 
-	/* Set up the thread as an active thread. As if runnable */
+	/* Set up the thread as an active thread (as if runnable). */
 	ke2->ke_thread = td2;
 	td2->td_kse = ke2;
 	TAILQ_REMOVE(&kg2->kg_iq, ke2, ke_kgrlist);
 	ke2->ke_state = KES_UNQUEUED;
 	kg2->kg_idle_kses--;
-	td2->td_flags &= ~TDF_UNBOUND; /* for the rest of this sycall */
+	td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this sycall. */
 
 	/* note.. XXXKSE no pcb or u-area yet */
 
@@ -833,7 +832,7 @@
 
 	td->td_kse->ke_oncpu = PCPU_GET(cpuid);
 	p->p_state = PRS_NORMAL;
-	td->td_state = TDS_RUNNING; /* already done in switch() on 386 */
+	td->td_state = TDS_RUNNING; /* Already done in switch() on 386. */
 	/*
 	 * Finish setting up thread glue.  We need to initialize
 	 * the thread into a td_critnest=1 state.  Some platforms

==== //depot/projects/kse/sys/kern/kern_idle.c#13 (text+ko) ====

@@ -60,7 +60,7 @@
 			panic("idle_setup: kthread_create error %d\n", error);
 
 		p->p_flag |= P_NOLOAD;
-		FIRST_THREAD_IN_PROC(p)->td_state = TDS_RUNQ; /* XXXKSE */
+		FIRST_THREAD_IN_PROC(p)->td_state = TDS_RUNQ;	/* XXXKSE */
 #ifdef SMP
 	}
 #endif
@@ -104,7 +104,7 @@
 
 		mtx_lock_spin(&sched_lock);
 		curthread->td_proc->p_stats->p_ru.ru_nvcsw++;
-		curthread->td_state = TDS_RUNQ; /*pretend we are on the runq */
+		curthread->td_state = TDS_RUNQ; /* Pretend we are on the runq */
 		mi_switch();
 		mtx_unlock_spin(&sched_lock);
 	}

==== //depot/projects/kse/sys/kern/kern_proc.c#60 (text+ko) ====

@@ -208,38 +208,27 @@
 	p = td->td_proc;
 	PROC_LOCK(p);
 	/*
-	 * If we have no KSE mode set, just set it,
-	 * and skip KSE and KSEGRP creation.
-	 * You cannot request a new group with the first one
-	 * as you are effectively getting one.
-	 * go directly to saving the upcall info.
+	 * If we have no KSE mode set, just set it, and skip KSE and KSEGRP
+	 * creation.  You cannot request a new group with the first one as
+	 * you are effectively getting one. Instead, go directly to saving
+	 * the upcall info.
 	 */
 	if (td->td_proc->p_flag & P_KSES) {
 
 		return (EINVAL);	/* XXX */
 		/*
-		 * If newgroup then create the new group. 
+		 * If newgroup then create the new group.
 		 * Check we have the resources for this.
 		 */
-
-		/*
-		 * Copy lots of fields from the current KSEGRP.
-		 */
-
+		/* Copy lots of fields from the current KSEGRP.  */
+		/* Create the new KSE */
+		/* Copy lots of fields from the current KSE.  */
+	} else {
 		/*
-		 * Create the new KSE
-		 */
-
-		/*
-		 * Copy lots of fields from the current KSE.
-		 */
-
-	} else {
-		/* 
 		 * We are switching to KSEs so just
 		 * use the preallocated ones for this call.
 		 * XXXKSE if we have to initialise any fields for KSE
-		 * mode operation, then do it here.
+		 * mode operation, do it here.
 		 */
 		newkse = td->td_kse;
 	}
@@ -248,24 +237,25 @@
 	 */
 	PROC_UNLOCK(p);
 	mtx_lock_spin(&sched_lock);
-	mi_switch();	/* save current registers to PCB */
+	mi_switch();	/* Save current registers to PCB. */
 	mtx_unlock_spin(&sched_lock);
 	PROC_LOCK(p);
 	cpu_save_upcall(td, newkse);
 	newkse->ke_mailbox = uap->mbx;
 	PROC_UNLOCK(p);
-	td->td_retval[0] = 1;	/* note that we are the returning syscall */
-	td->td_retval[1] = 0;	/* note that we are the returning syscall */
+	/* Note that we are the returning syscall */
+	td->td_retval[0] = 1;
+	td->td_retval[1] = 0;
 
 	if (td->td_proc->p_flag & P_KSES) {
 		thread_schedule_upcall(td, newkse);
 	} else {
 		/*
-		 * Don't set this til we are truely
-		 * ready because things will start acting differently.
-		 * Return to the upcall code for the first time.
-		 * Assuming we set up the mailboxes right,
-		 * all syscalls after this will be asynchronous.
+		 * Don't set this untill we are truely ready, because
+		 * things will start acting differently.  Return to the
+		 * upcall code for the first time.  Assuming we set up
+		 * the mailboxes right, all syscalls after this will be
+		 * asynchronous.
 		 */
 		td->td_proc->p_flag |= P_KSES;
 	}
@@ -729,14 +719,14 @@
 	if (!(p->p_flag & P_KSES)) {
 		if (td->td_wmesg != NULL)
 			strncpy(kp->ki_wmesg, td->td_wmesg,
-				sizeof(kp->ki_wmesg) - 1);
+			    sizeof(kp->ki_wmesg) - 1);
 		if (td->td_state == TDS_MTX) {
 			kp->ki_kiflag |= KI_MTXBLOCK;
 			strncpy(kp->ki_mtxname, td->td_mtxname,
 			    sizeof(kp->ki_mtxname) - 1);
 		}
 	}
-	kp->ki_stat = p->p_state; /* XXXKSE Doesn't MAP */
+	kp->ki_stat = p->p_state;	/* XXXKSE Doesn't MAP */
 	kp->ki_sflag = p->p_sflag;
 	kp->ki_swtime = p->p_swtime;
 	kp->ki_traceflag = p->p_traceflag;

==== //depot/projects/kse/sys/kern/kern_sig.c#49 (text+ko) ====

@@ -1310,29 +1310,29 @@
 	/*
 	 * Some signals have a process-wide effect and a per-thread
 	 * component.  Most processing occurs when the process next
-	 * tries to cross the user boundary, however there is
-	 * sometimes something that needs to be done immediatly, such as
-	 * waking up the threads so that it can cross the user boundary.
+	 * tries to cross the user boundary, however there are some
+	 * times when processing needs to be done immediatly, such as
+	 * waking up threads so that they can cross the user boundary.
 	 * We try do the per-process part here.
 	 */
 	if (P_SHOULDSTOP(p)) {
 		/*
-		 * The process is in stopped mode. All the threads should be 
-		 * either winding down, or already on the suspended queue.
+		 * The process is in stopped mode. All the threads should be
+		 * either winding down or already on the suspended queue.
 		 */
 		if (p->p_flag & P_TRACED) {
 			/*
 			 * The traced process is already stopped,
 			 * so no further action is necessary.
-			 * No signal can restart us,
+			 * No signal can restart us.
 			 */
 			goto out;
 		}
 
 		if (sig == SIGKILL) {
 			/*
-			 * Kill signal always sets processes running.
-			 * They actually die elsewhere.
+			 * SIGKILL sets process running.
+			 * It will die elsewhere.
 			 * All threads must be restarted.
 			 */
 			p->p_flag &= ~P_STOPPED;
@@ -1342,15 +1342,12 @@
 		if (prop & SA_CONT) {
 			/*
 			 * If SIGCONT is default (or ignored), we continue the
-			 * process but don't leave the signal in p_siglist, as
-			 * it has no further action.
-			 * If SIGCONT is held, we continue
-			 * the process and leave the signal in
-			 * p_siglist.
-			 * If the process catches SIGCONT, let it
-			 * handle the signal itself.
-			 * If it isn't waiting on
-			 * an event, then it goes back to run state.
+			 * process but don't leave the signal in p_siglist as
+			 * it has no further action.  If SIGCONT is held, we
+			 * continue the process and leave the signal in
+			 * p_siglist.  If the process catches SIGCONT, let it
+			 * handle the signal itself.  If it isn't waiting on
+			 * an event, it goes back to run state.
 			 * Otherwise, process goes back to sleep state.
 			 */
 			p->p_flag &= ~P_STOPPED_SGNL;
@@ -1363,7 +1360,7 @@
 				 * It would seem that the answer would be to
 				 * run an upcall in the next KSE to run, and
 				 * deliver the signal that way. In a NON KSE
-				 * process, we need to make sure that the 
+				 * process, we need to make sure that the
 				 * single thread is runnable asap.
 				 * XXXKSE for now however, make them all run.
 				 */
@@ -1373,15 +1370,15 @@
 			 * The signal is not ignored or caught.
 			 */
 			mtx_lock_spin(&sched_lock);
-			thread_unsuspend(p);	 /* checks if should do it */
+			thread_unsuspend(p);	/* Checks if should do it. */
 			mtx_unlock_spin(&sched_lock);
 			goto out;
 		}
 
 		if (prop & SA_STOP) {
 			/*
-			 * Already stopped, don't need to stop again.
-			 * (If we did the shell could get confused.)
+			 * Already stopped, don't need to stop again
+			 * (If we did the shell could get confused).
 			 */
 			SIGDELSET(p->p_siglist, sig);
 			goto out;
@@ -1389,9 +1386,9 @@
 
 		/*
 		 * All other kinds of signals:
-		 * If a thread is sleeping interruptibly, then simulate a
-		 * wakeup so that when it is continued, it will be made
-		 * runnable and can look at the signal.  But don't make
+		 * If a thread is sleeping interruptibly, simulate a
+		 * wakeup so that when it is continued it will be made
+		 * runnable and can look at the signal.  However, don't make
 		 * the process runnable, leave it stopped.
 		 * It may run a bit until it hits a thread_suspend_check().
 		 *
@@ -1447,24 +1444,18 @@
 			stop(p);
 			mtx_unlock_spin(&sched_lock);
 			goto out;
-		} else {
+		} else
 			goto allrunfast;
-		}
 		/* NOTREACHED */
 	} else {
-		/* not in "NORMAL" state. discard the signal.  */
+		/* Not in "NORMAL" state. discard the signal. */
 		SIGDELSET(p->p_siglist, sig);
 		goto out;
 	}
 
 	/*
-	 * The process is not stopped so we need to apply the signal to all the 
+	 * The process is not stopped so we need to apply the signal to all the
 	 * running threads.
-	 * XXXKSE
-	 * For now there is one thread per proc.
-	 * We may not want to do if for each thread,
-	 * but we don't yet know when we do and when we don't.
-	 * We'll fix it later.
 	 */
 
 allrunfast:
@@ -1478,7 +1469,7 @@
 	mtx_assert(&sched_lock, MA_NOTOWNED);
 }
 
-/* 
+/*
  * The force of a signal has been directed against a single
  * thread. We need to see what we can do about knocking it
  * out of any sleep it may be in etc.
@@ -1492,7 +1483,7 @@
 	prop = sigprop(sig);
 
 	/*
-	 * bring the priority of a process up if we want it to get 
+	 * Bring the priority of a process up if we want it to get
 	 * killed in this lifetime.
 	 * XXXKSE we should shift the priority to the thread.
 	 */
@@ -1524,9 +1515,9 @@
 			goto out;
 		}
 		/*
-		 * Process is sleeping and traced... make it runnable
+		 * Process is sleeping and traced.  Make it runnable
 		 * so it can discover the signal in issignal() and stop
-		 * for the parent.
+		 * for its parent.
 		 */
 		if (p->p_flag & P_TRACED) {
 			p->p_flag &= ~P_STOPPED_TRACE;

==== //depot/projects/kse/sys/kern/kern_switch.c#44 (text+ko) ====

@@ -48,9 +48,9 @@
  ************************************************************************/
 
 /*
- * Select the KSE that will be run next.
- * from that find the thread, and remove it from the KSEGRP's run queue.
- * If there is thread clustering, this will be what does it.
+ * Select the KSE that will be run next.  From that find the thread, and x
+ * remove it from the KSEGRP's run queue.  If there is thread clustering,
+ * this will be what does it.
  */
 struct thread *
 choosethread(void)
@@ -70,7 +70,7 @@
 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
 		    td, td->td_priority);
 	} else {
-		/* pretend the idle thread was on the run queue */
+		/* Pretend the idle thread was on the run queue. */
 		td = PCPU_GET(idlethread);
 		td->td_state = TDS_RUNQ;
 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
@@ -79,8 +79,8 @@
 }
 
 /*
- * Given a KSE (now surplus), either assign a new runable thread to it,
- * and put it in the run queue, or put it in the ksegrp's idle KSE list.
+ * Given a KSE (now surplus), either assign a new runable thread to it
+ * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
  */
 void
 kse_reassign(struct kse *ke)
@@ -91,7 +91,7 @@
 
 	ke->ke_state = KES_IDLE; /* temp state */
 	if ((td = kg->kg_last_assigned)) {
-		/* If there is a 'last assigned' then see what's next */
+		/* If there is a 'last assigned' then see what's next. */
 		td = TAILQ_NEXT(td, td_runq);
 	} else {
 		td = TAILQ_FIRST(&kg->kg_runq);
@@ -101,7 +101,7 @@
 		kg->kg_last_assigned = td;
 		td->td_kse = ke;
 		ke->ke_thread = td;
-		runq_add(&runq, ke); 
+		runq_add(&runq, ke);
 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
 	} else {
 		ke->ke_state = KES_IDLE;
@@ -148,7 +148,7 @@
 	return runq_check(&runq);
 }
 
-/* 
+/*
  * Remove a thread from it's KSEGRP's run queue.
  * This in turn may remove it from a KSE if it was already assigned
  * to one, possibly causing a new thread to be assigned to the KSE
@@ -163,19 +163,20 @@
 
 	if (td->td_state != TDS_RUNQ) {
 		panic("remrunqueue: Bad state on run queue");
+		/* NOTREACHED */
 		return;
 	}
 	kg = td->td_ksegrp;
 	ke = td->td_kse;
 	/*
-	 *  If it's a bound thread/KSE pair, tka the shortcut. All non-KSE
+	 * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
 	 * threads are BOUND.
 	 */
 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
 	td->td_state = TDS_UNQUEUED;
 	kg->kg_runnable--;
 	if ((td->td_flags & TDF_UNBOUND) == 0)  {
-		/* bring it's kse with it */
+		/* Bring its kse with it. */
 		runq_remove(&runq, ke);
 		ke->ke_state = KES_UNQUEUED; 
 		return;
@@ -183,16 +184,16 @@
 	if (ke) {
 		/*
 		 * This thread has been assigned to a KSE.
-		 * so we need to dissociate it and try assign the 
-		 * KSE to the next available thread, then we should
+		 * We need to dissociate it and try assign the
+		 * KSE to the next available thread. Then, we should
 		 * see if we need to move the KSE in the run queues.
 		 */
 		td2 = kg->kg_last_assigned;
 		if ((td3 = TAILQ_NEXT(td2, td_runq))) {
 			/*
-			 * give the next unassigned thread to the KSE
-			 * so the number of runnable kSEs remains
-			 * constant
+			 * Give the next unassigned thread to the KSE
+			 * so the number of runnable KSEs remains
+			 * constant.
 			 */
 			td3->td_kse = ke;
 			ke->ke_thread = td3;
@@ -201,8 +202,8 @@
 		} else {
 			/*
 			 * There is no unassigned thread.
-			 * If we were the last assigned one
-			 * then adjust the last assigned pointer back
+			 * If we were the last assigned one,
+			 * adjust the last assigned pointer back
 			 * one, which may result in NULL.
 			 */
 			if (td == td2) {
@@ -215,7 +216,7 @@
 				kg->kg_last_assigned = TAILQ_PREV(td,
 				    threadlist_head, td_runq);
 			}
-			ke->ke_state = KES_IDLE; 
+			ke->ke_state = KES_IDLE;
 			TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
 			kg->kg_idle_kses++;
 		}
@@ -243,8 +244,8 @@
 		runq_add(&runq, td->td_kse);
 		return;
 	}
-	/* 
-	 * first add the thread to the ksegrp's run queue at
+	/*
+	 * First add the thread to the ksegrp's run queue at
 	 * the appropriate place.
 	 */
 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
@@ -258,15 +259,15 @@
 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
 	}
 
-	/* 
+	/*
 	 * The following could be achieved by simply doing:
 	 * td->td_kse = NULL; kse_reassign(ke);
 	 * but I felt that I'd try do it inline here.
 	 * All this work may not be worth it.
 	 */
 	if ((ke = td->td_kse)) { /* XXXKSE */
-		/* 
-		 * We have a KSE already, see if we can keep it,
+		/*
+		 * We have a KSE already. See whether we can keep it
 		 * or if we need to give it to someone else.
 		 * Either way it will need to be inserted into
 		 * the runq. kse_reassign() will do this as will runq_add().
@@ -283,7 +284,7 @@
 			return;
 
 		}
-		/* 
+		/*
 		 * Give it to the correct thread,
 		 * which may be (often is) us, but may not be.
 		 */
@@ -291,16 +292,16 @@
 		kse_reassign(ke);
 		return;
 	}
-	/* 
+	/*
 	 * There are two cases where KSE adjustment is needed.
 	 * Usurpation of an already assigned KSE, and assignment
 	 * of a previously IDLE KSE.
 	 */
 	if (kg->kg_idle_kses) {
-		/* 
+		/*
 		 * If there are unassigned KSEs then we definitly
-		 * will be assigned one from the idle KSE list. Then
-		 * if we are the last, we should get the "last
+		 * will be assigned one from the idle KSE list.
+		 * If we are the last, we should get the "last
 		 * assigned" pointer set to us as well.
 		 */
 		ke = TAILQ_FIRST(&kg->kg_iq);
@@ -316,14 +317,12 @@
 	} else if (kg->kg_last_assigned &&
 		(kg->kg_last_assigned->td_priority > td->td_priority)) {
 		/*
-		 * If there was NO last-assigned then all the KSEs
-		 * are actually out running as we speak,
-		 * If there is a last assigned, but we didn't see it
-		 * Then we must be inserting before it,
-		 * So take the KSE from the last assigned,
-		 * and back it up one entry. Then assign the KSE to
-		 * the new thread and adjust it's priority.
-		 * so do nothing.
+		 * If there were none last-assigned, all KSEs
+		 * are actually out running as we speak.
+		 * If there was a last assigned, but we didn't see it,
+		 * we must be inserting before it, so take the KSE from
+		 * the last assigned, and back it up one entry. Then,
+		 * assign the KSE to the new thread and adjust it's priority.
 		 */
 		td2 = kg->kg_last_assigned;
 		ke = td2->td_kse;

==== //depot/projects/kse/sys/kern/kern_synch.c#61 (text+ko) ====

@@ -441,11 +441,12 @@
 	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
 	    ("sleeping without a mutex"));
 	/*
-	 * If we are capable of async syscalls, and there isn't already
-	 * another one ready to return, then start a new thread
-	 * and queue it ready to run. Note there is danger here as we need
-	 * to make sure that we don't sleep getting it
-	 * (recursign might be bad). Hense the TDF_INMSLEEP flag.
+	 * If we are capable of async syscalls and there isn't already
+	 * another one ready to return, start a new thread
+	 * and queue it as ready to run. Note that there is danger here
+	 * because we need to make sure that we don't sleep allocating
+	 * the thread (recursion here might be bad).
+	 * Hence the TDF_INMSLEEP flag.
 	 */
 	if (p->p_flag & P_KSES) {
 		/* Just don't bother if we are exiting
@@ -453,14 +454,14 @@
 		if ((p->p_flag & P_WEXIT) && catch && p->p_singlethread != td)
 			return (EINTR);
 		if (td->td_mailbox && (!(td->td_flags & TDF_INMSLEEP))) {
-			/* 
+			/*
 			 * If we have no queued work to do, then
 			 * upcall to the UTS to see if it has more to do.
 			 * We don't need to upcall now, just make it and
 			 * queue it.
 			 */
 			if (TAILQ_FIRST(&td->td_ksegrp->kg_runq) == NULL) {
-				/* don't recurse here! */
+				/* Don't recurse here! */
 				mtx_lock_spin(&sched_lock);
 				td->td_flags |= TDF_INMSLEEP;
 				thread_schedule_upcall(td, td->td_kse);
@@ -628,20 +629,19 @@
 }
 
 /*
- * Abort a thread, as if an interrupt had occured.
- * Only abort interruptable waits (unfortunatly it 
- * is only safe to abort these.
- * This is about identical to cv_abort(). 
+ * Abort a thread, as if an interrupt had occured.  Only abort
+ * interruptable waits (unfortunatly it isn't only safe to abort others).
+ * This is about identical to cv_abort().
  * Think about merging them?
- * Also whatever the signal code does...
+ * Also, whatever the signal code does...
  */
 void
-abortsleep(struct thread *td) 
+abortsleep(struct thread *td)
 {
 
 	mtx_lock_spin(&sched_lock);
 	/*
-	 * If the TDF_TIMEOUT flag is set, then just leave. A
+	 * If the TDF_TIMEOUT flag is set, just leave. A
 	 * timeout is scheduled anyhow.
 	 */
 	if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
@@ -649,7 +649,10 @@
 			if (td->td_state == TDS_SLP) {  /* XXXKSE */
 				setrunnable(td);
 			} else {
-			/* probably in a suspended state.. um.. dunno XXXKSE */
+				/*
+				 * Probably in a suspended state..
+				 * um.. dunno XXXKSE
+				 */
 				unsleep(td);
 			}
 		}
@@ -842,24 +845,22 @@
 	ke->ke_oncpu = NOCPU;
 	ke->ke_flags &= ~KEF_NEEDRESCHED;
 	/*
-	 * At the last moment, if this KSE is not on the run queue
-	 * then it needs to be freed correctly, and the thread
-	 * treated accordingly.
+	 * At the last moment: if this KSE is not on the run queue,
+	 * it needs to be freed correctly and the thread treated accordingly.
 	 */
 	if (td->td_state == TDS_RUNNING) {
-		/* put us back on the run queue (kse and all) */
+		/* Put us back on the run queue (kse and all). */
 		setrunqueue(td);
 	} else if ((td->td_flags & TDF_UNBOUND) && (td->td_state != TDS_RUNQ)) {
 		/*
-		 * We will not be on the run queue
-		 * someone else can use the KSE if they need it.
+		 * We will not be on the run queue.
+		 * Someone else can use the KSE if they need it.
 		 */
 		td->td_kse = NULL;
 		kse_reassign(ke);
 	}
 	cpu_switch();
 	td->td_kse->ke_oncpu = PCPU_GET(cpuid);
-	/* td->td_state = TDS_RUNNING; */ /* already done in switch on 386 */
 	sched_lock.mtx_recurse = sched_nest;
 	sched_lock.mtx_lock = (uintptr_t)td;
 	CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,

==== //depot/projects/kse/sys/kern/kern_thread.c#48 (text+ko) ====

@@ -51,10 +51,8 @@
 #include <vm/uma.h>
 #include <vm/vm_map.h>
 
-/*static MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); */
-
 /*
- * Thread related storage
+ * Thread related storage.
  */
 static uma_zone_t thread_zone;
 static int allocated_threads;
@@ -246,6 +244,9 @@
 	uma_zfree(thread_zone, td);
 }
 
+/*
+ * Store the thread context in the UTS's mailbox.
+ */
 int
 thread_export_context(struct thread *td)
 {
@@ -255,14 +256,14 @@
 	void *addr2;
 	int error;
 
-	/* Export the register contents.  */
+	/* Export the register contents. */
 	error = cpu_export_context(td);
 
 	addr1 = (caddr_t)ke->ke_mailbox
 			+ offsetof(struct kse_mailbox, completed_threads);
 	addr2 = (caddr_t)td->td_mailbox
 			+ offsetof(struct thread_mailbox , next_completed);
-	/* Then link it into it's KSE's list of completed threads */
+	/* Then link it into it's KSE's list of completed threads. */
 	if (!error)
 		error = copyin( addr1, &td2_mbx, sizeof(void *));
 	if (!error)
@@ -308,8 +309,10 @@
 }
 
 /*
- * Note that we do not link to the proc's ucred here
- * The thread is linked as if running but no KSE assigned
+ * Link a thread to a process.
+ *
+ * Note that we do not link to the proc's ucred here.
+ * The thread is linked as if running but no KSE assigned.
  */
 void
 thread_link(struct thread *td, struct ksegrp *kg)
@@ -331,7 +334,7 @@
 }
 

>>> TRUNCATED FOR MAIL (1000 lines) <<<

To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe p4-projects" in the body of the message




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200205230504.g4N54J914922>