Date: Wed, 17 Nov 2004 10:27:21 GMT From: Julian Elischer <julian@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 65317 for review Message-ID: <200411171027.iAHARL6l052778@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=65317 Change 65317 by julian@julian_ref on 2004/11/17 10:27:14 small hack to cached fully functional threads. not tested.. some stuff is definitly out of scope (wrong word but I can't think of the right one now). KSE specific stuff is seen out of the kse file.. Affected files ... .. //depot/projects/nsched/sys/kern/kern_kse.c#35 edit .. //depot/projects/nsched/sys/kern/kern_synch.c#19 edit .. //depot/projects/nsched/sys/kern/kern_thread.c#46 edit .. //depot/projects/nsched/sys/sys/proc.h#41 edit Differences ... ==== //depot/projects/nsched/sys/kern/kern_kse.c#35 (text+ko) ==== @@ -1022,25 +1022,83 @@ * from the schedlock as it has a mutex op itself. * XXX BUG.. we need to get the cr ref after the thread has * checked and chenged its own, not 6 months before... + * + * If we have already some cached fully running threads, use them + * in preference as they need no setup. */ void thread_alloc_spare(struct thread *td) { struct thread *spare; + struct ksegrp *kg; + if (td->td_standin) return; - spare = thread_alloc(); + kg = td->td_ksegrp; + spare = TAILQ_FIRST(&kg->kg_ghostq); + if (spare) { + TAILQ_REMOVE(&kg->kg_ghostq, spare, td_runq); + kg->kg_numghosts--; + } else { + spare = thread_alloc(); + bzero(&spare->td_startzero, + (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); + spare->td_proc = td->td_proc; + spare->td_ucred = crhold(td->td_ucred); + } td->td_standin = spare; - bzero(&spare->td_startzero, - (unsigned) RANGEOF(struct thread, td_startzero, td_endzero)); - spare->td_proc = td->td_proc; - spare->td_ucred = crhold(td->td_ucred); +} + +/* + * Divert exiting threads to be held in a ksegrp cache of + * threads that are already in thread_userret(). + * if they are run, they will exit unless they have an upcall + * (see thread_userret() ) + * + * Called from: + * thread_userret() + * + */ +void +thread_hold(struct thread *td) +{ + struct ksegrp *kg = td->td_ksegrp; + + mtx_assert(&sched_lock, MA_OWNED); + TAILQ_INSERT_HEAD(&kg->kg_ghostq, td, td_runq); + kg->kg_numghosts++; + thread_unlink(td); + KASSERT((kg->kg_numthreads != 0), ("thread_hold: Cached last thread.")); + mi_switch(0, NULL); +} + +/* + * Because we are exiting the ksegrp, + * we should let all the cached threads run so that they can + * suicide. + * + * Called from: + * thread_exit() + */ +void +thread_unhold_all(struct ksegrp *kg) +{ + struct thread *td; + + mtx_assert(&sched_lock, MA_OWNED); + while ((td = TAILQ_FIRST(&kg->kg_ghostq))) { + TAILQ_REMOVE(&kg->kg_ghostq, td, td_runq); + kg->kg_numghosts--; + setrunnable(td); + thread_link(td, kg); + } } /* * Create a thread and schedule it for upcall on the KSE given. * Use our thread's standin so that we don't have to allocate one. + * If the standin is a ghost then just rescusitate it and use it. */ struct thread * thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) @@ -1064,6 +1122,15 @@ CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", td2, td->td_proc->p_pid, td->td_proc->p_comm); /* + * if it's a ghost we are already all set up to run.. + * just give it the upcall and release it. + */ + if (td2->td_state == TDS_GHOST) { + td2->td_upcall = ku; + thread_link(td2, td->td_ksegrp); + return(td2); + } + /* * Bzero already done in thread_alloc_spare() because we can't * do the crhold here because we are in schedlock already. */ @@ -1315,9 +1382,12 @@ wakeup(&kg->kg_completed); mtx_lock_spin(&sched_lock); thread_stopped(p); - thread_exit(); - /* NOTREACHED */ - } + thread_hold(td); /* thread effectively stops running */ + if ((ku = td->td_upcall) == NULL) + thread_exit(); + td->td_pflags |= TDP_UPCALLING; + mtx_unlock_spin(&sched_lock); + } KASSERT(ku != NULL, ("upcall is NULL")); KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); ==== //depot/projects/nsched/sys/kern/kern_synch.c#19 (text+ko) ==== @@ -374,6 +374,12 @@ case TDS_RUNNING: case TDS_RUNQ: return; + case TDS_GHOST: + if (td->td_inhibitors == 0) { + td->td_state = TDS_CAN_RUN; + break; + } else + td->td_state = TDS_INHIBITED; case TDS_INHIBITED: /* * If we are only inhibited because we are swapped out ==== //depot/projects/nsched/sys/kern/kern_thread.c#46 (text+ko) ==== @@ -557,6 +557,12 @@ * takes the short path. */ if (p->p_flag & P_HADTHREADS) { + /* + * If only one thread make sure we free any cached ghosts. + * which may make numhreads >1 for a moment. + */ + if (p->p_numthreads > 1) + thread_unhold_all(kg); if (p->p_numthreads > 1) { thread_unlink(td); @@ -596,7 +602,11 @@ /* * If the thread we unlinked above was the last one, * then this ksegrp should go away too. + * but first lett any cached "ghost" threads suicide. + * This may temporarily increase the number of threads. */ + if (kg->kg_numthreads == 0) + thread_unhold_all(kg); if (kg->kg_numthreads == 0) { /* * let the scheduler know about this in case @@ -614,6 +624,7 @@ * This is probably not fair so think of * a better answer. */ + thread_unhold_all(kg); sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td); sched_set_concurrency(kg, 0); /* XXX TEMP */ ksegrp_unlink(kg); @@ -687,6 +698,7 @@ * proc_linkup() * thread_schedule_upcall() * thr_create() + * thread_unhold_all() */ void thread_link(struct thread *td, struct ksegrp *kg) @@ -710,6 +722,7 @@ /* * Convert a process with one thread to an unthreaded process. + * * Called from: * thread_single(exit) (called from execve and exit) * kse_exit() XXX may need cleaning up wrt KSE stuff @@ -718,22 +731,30 @@ thread_unthread(struct thread *td) { struct proc *p = td->td_proc; + struct thread *spare; KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); + if (td->td_standin != NULL) { + spare = td->td_standin; + td->td_standin = NULL; + if (spare->td_state == TDS_GHOST) { + thread_link(spare, td->td_ksegrp); + mi_switch(0, spare); + } else { + thread_stash(spare); + } + } upcall_remove(td); p->p_flag &= ~(P_SA|P_HADTHREADS); td->td_mailbox = NULL; td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND); - if (td->td_standin != NULL) { - thread_stash(td->td_standin); - td->td_standin = NULL; - } sched_set_concurrency(td->td_ksegrp, 1); } /* * Called from: * thread_exit() + * thread_hold() */ void thread_unlink(struct thread *td) ==== //depot/projects/nsched/sys/sys/proc.h#41 (text+ko) ==== @@ -304,6 +304,7 @@ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum { TDS_INACTIVE = 0x0, + TDS_GHOST, /* not allowed to die yet.. cached */ TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, @@ -327,7 +328,7 @@ * Flags kept in td_flags: * To change these you MUST have the scheduler lock. */ -#define TDF_UNUSED0 0x00000001 /* --available -- */ +#define TDF_GHOST 0x00000001 /* All set up but not alive */ #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ @@ -466,6 +467,7 @@ TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */ TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */ TAILQ_HEAD(, thread) kg_slpq; /* (td_runq) NONRUNNABLE threads. */ + TAILQ_HEAD(, thread) kg_ghostq; /* (td_runq) cached instead of dead. */ TAILQ_HEAD(, kse_upcall) kg_upcalls; /* All upcalls in the group. */ #define kg_startzero kg_estcpu @@ -484,6 +486,7 @@ u_char kg_user_pri; /* (j) User pri from estcpu and nice. */ #define kg_endcopy kg_numthreads int kg_numthreads; /* (j) Num threads in total. */ + int kg_numghosts; /* (j) number of threads ghosting */ struct kg_sched *kg_sched; /* (*) Scheduler-specific data. */ }; @@ -932,6 +935,8 @@ void upcall_remove(struct thread *td); void upcall_stash(struct kse_upcall *ke); +void thread_unhold_all(struct ksegrp *kg); +void thread_hold(struct thread *td); #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200411171027.iAHARL6l052778>