Date: Sun, 2 May 2004 14:56:46 -0700 (PDT) From: Julian Elischer <julian@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 52101 for review Message-ID: <200405022156.i42Lukij086945@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=52101 Change 52101 by julian@julian_desk on 2004/05/02 14:56:01 split kern_thread.c into 2 files kern_thread.c contains only code that is not specific to any particular threading implemntation.. kern_kse.c contains code specific to the KSE threading implimentation. The split is not quite perfect and it almost certainly doesn't compile yet. Affected files ... .. //depot/projects/nsched/sys/conf/files#6 edit .. //depot/projects/nsched/sys/kern/kern_kse.c#1 add .. //depot/projects/nsched/sys/kern/kern_thread.c#8 edit Differences ... ==== //depot/projects/nsched/sys/conf/files#6 (text+ko) ==== @@ -1044,7 +1044,7 @@ kern/kern_idle.c standard kern/kern_intr.c standard kern/kern_jail.c standard -kern/kern_thr.c standard +kern/kern_kse.c standard kern/kern_kthread.c standard kern/kern_ktr.c optional ktr kern/kern_ktrace.c standard @@ -1071,6 +1071,7 @@ kern/kern_syscalls.c standard kern/kern_sysctl.c standard kern/kern_tc.c standard +kern/kern_thr.c standard kern/kern_thread.c standard kern/kern_time.c standard kern/kern_timeout.c standard ==== //depot/projects/nsched/sys/kern/kern_thread.c#8 (text+ko) ==== @@ -91,8 +91,6 @@ TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); -TAILQ_HEAD(, kse_upcall) zombie_upcalls = - TAILQ_HEAD_INITIALIZER(zombie_upcalls); struct mtx kse_zombie_lock; MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); @@ -332,56 +330,6 @@ ksegrp_stash(kg); } -struct kse_upcall * -upcall_alloc(void) -{ - struct kse_upcall *ku; - - ku = uma_zalloc(upcall_zone, M_WAITOK); - bzero(ku, sizeof(*ku)); - return (ku); -} - -void -upcall_free(struct kse_upcall *ku) -{ - - uma_zfree(upcall_zone, ku); -} - -void -upcall_link(struct kse_upcall *ku, struct ksegrp *kg) -{ - - mtx_assert(&sched_lock, MA_OWNED); - TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); - ku->ku_ksegrp = kg; - kg->kg_numupcalls++; -} - -void -upcall_unlink(struct kse_upcall *ku) -{ - struct ksegrp *kg = ku->ku_ksegrp; - - mtx_assert(&sched_lock, MA_OWNED); - KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); - TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); - kg->kg_numupcalls--; - upcall_stash(ku); -} - -void -upcall_remove(struct thread *td) -{ - - if (td->td_upcall) { - td->td_upcall->ku_owner = NULL; - upcall_unlink(td->td_upcall); - td->td_upcall = 0; - } -} - /* * For a newly created process, * link up all the structures and its initial threads etc. @@ -411,441 +359,6 @@ }; #endif -int -kse_switchin(struct thread *td, struct kse_switchin_args *uap) -{ - mcontext_t mc; - int error; - - error = (uap->mcp == NULL) ? EINVAL : 0; - if (!error) - error = copyin(uap->mcp, &mc, sizeof(mc)); - if (!error && uap->loc != NULL) - error = (suword(uap->loc, uap->val) != 0) ? EINVAL : 0; - if (!error) - error = set_mcontext(td, &mc); - return ((error == 0) ? EJUSTRETURN : error); -} - -/* -struct kse_thr_interrupt_args { - struct kse_thr_mailbox * tmbx; - int cmd; - long data; -}; -*/ -int -kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) -{ - struct proc *p; - struct thread *td2; - - p = td->td_proc; - - if (!(p->p_flag & P_SA)) - return (EINVAL); - - switch (uap->cmd) { - case KSE_INTR_SENDSIG: - if (uap->data < 0 || uap->data > _SIG_MAXSIG) - return (EINVAL); - case KSE_INTR_INTERRUPT: - case KSE_INTR_RESTART: - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - FOREACH_THREAD_IN_PROC(p, td2) { - if (td2->td_mailbox == uap->tmbx) - break; - } - if (td2 == NULL) { - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - return (ESRCH); - } - if (uap->cmd == KSE_INTR_SENDSIG) { - if (uap->data > 0) { - td2->td_flags &= ~TDF_INTERRUPT; - mtx_unlock_spin(&sched_lock); - tdsignal(td2, (int)uap->data, SIGTARGET_TD); - } else { - mtx_unlock_spin(&sched_lock); - } - } else { - td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING; - if (TD_CAN_UNBIND(td2)) - td2->td_upcall->ku_flags |= KUF_DOUPCALL; - if (uap->cmd == KSE_INTR_INTERRUPT) - td2->td_intrval = EINTR; - else - td2->td_intrval = ERESTART; - if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) - sleepq_abort(td2); - mtx_unlock_spin(&sched_lock); - } - PROC_UNLOCK(p); - break; - case KSE_INTR_SIGEXIT: - if (uap->data < 1 || uap->data > _SIG_MAXSIG) - return (EINVAL); - PROC_LOCK(p); - sigexit(td, (int)uap->data); - break; - default: - return (EINVAL); - } - return (0); -} - -/* -struct kse_exit_args { - register_t dummy; -}; -*/ -int -kse_exit(struct thread *td, struct kse_exit_args *uap) -{ - struct proc *p; - struct ksegrp *kg; - struct kse_upcall *ku, *ku2; - int error, count; - - p = td->td_proc; - if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) - return (EINVAL); - kg = td->td_ksegrp; - count = 0; - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - FOREACH_UPCALL_IN_GROUP(kg, ku2) { - if (ku2->ku_flags & KUF_EXITING) - count++; - } - if ((kg->kg_numupcalls - count) == 1 && - (kg->kg_numthreads > 1)) { - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - return (EDEADLK); - } - ku->ku_flags |= KUF_EXITING; - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE); - PROC_LOCK(p); - if (error) - psignal(p, SIGSEGV); - mtx_lock_spin(&sched_lock); - upcall_remove(td); - if (p->p_numthreads == 1) { - kse_purge(p, td); - p->p_flag &= ~P_SA; - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - } else { - if (kg->kg_numthreads == 1) { /* Shutdown a group */ - kse_purge_group(td); - sched_exit_ksegrp(p->p_pptr, td); - } - thread_stopped(p); - thread_exit(); - /* NOTREACHED */ - } - return (0); -} - -/* - * Either becomes an upcall or waits for an awakening event and - * then becomes an upcall. Only error cases return. - */ -/* -struct kse_release_args { - struct timespec *timeout; -}; -*/ -int -kse_release(struct thread *td, struct kse_release_args *uap) -{ - struct proc *p; - struct ksegrp *kg; - struct kse_upcall *ku; - struct timespec timeout; - struct timeval tv; - sigset_t sigset; - int error; - - p = td->td_proc; - kg = td->td_ksegrp; - if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) - return (EINVAL); - if (uap->timeout != NULL) { - if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) - return (error); - TIMESPEC_TO_TIMEVAL(&tv, &timeout); - } - if (td->td_flags & TDF_SA) - td->td_pflags |= TDP_UPCALLING; - else { - ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags); - if (ku->ku_mflags == -1) { - PROC_LOCK(p); - sigexit(td, SIGSEGV); - } - } - PROC_LOCK(p); - if (ku->ku_mflags & KMF_WAITSIGEVENT) { - /* UTS wants to wait for signal event */ - if (!(p->p_flag & P_SIGEVENT) && !(ku->ku_flags & KUF_DOUPCALL)) - error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH, - "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0)); - p->p_flag &= ~P_SIGEVENT; - sigset = p->p_siglist; - PROC_UNLOCK(p); - error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught, - sizeof(sigset)); - } else { - if (! kg->kg_completed && !(ku->ku_flags & KUF_DOUPCALL)) { - kg->kg_upsleeps++; - error = msleep(&kg->kg_completed, &p->p_mtx, - PPAUSE|PCATCH, "kserel", - (uap->timeout ? tvtohz(&tv) : 0)); - kg->kg_upsleeps--; - } - PROC_UNLOCK(p); - } - if (ku->ku_flags & KUF_DOUPCALL) { - mtx_lock_spin(&sched_lock); - ku->ku_flags &= ~KUF_DOUPCALL; - mtx_unlock_spin(&sched_lock); - } - return (0); -} - -/* struct kse_wakeup_args { - struct kse_mailbox *mbx; -}; */ -int -kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) -{ - struct proc *p; - struct ksegrp *kg; - struct kse_upcall *ku; - struct thread *td2; - - p = td->td_proc; - td2 = NULL; - ku = NULL; - /* kSE-enabled processes only, please. */ - if (!(p->p_flag & P_SA)) - return (EINVAL); - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - if (uap->mbx) { - FOREACH_KSEGRP_IN_PROC(p, kg) { - FOREACH_UPCALL_IN_GROUP(kg, ku) { - if (ku->ku_mailbox == uap->mbx) - break; - } - if (ku) - break; - } - } else { - kg = td->td_ksegrp; - if (kg->kg_upsleeps) { - wakeup_one(&kg->kg_completed); - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - return (0); - } - ku = TAILQ_FIRST(&kg->kg_upcalls); - } - if (ku) { - if ((td2 = ku->ku_owner) == NULL) { - panic("%s: no owner", __func__); - } else if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) && - ((td2->td_wchan == &kg->kg_completed) || - (td2->td_wchan == &p->p_siglist && - (ku->ku_mflags & KMF_WAITSIGEVENT)))) { - sleepq_abort(td2); - } else { - ku->ku_flags |= KUF_DOUPCALL; - } - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - return (0); - } - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - return (ESRCH); -} - -/* - * No new KSEG: first call: use current KSE, don't schedule an upcall - * All other situations, do allocate max new KSEs and schedule an upcall. - */ -/* struct kse_create_args { - struct kse_mailbox *mbx; - int newgroup; -}; */ -int -kse_create(struct thread *td, struct kse_create_args *uap) -{ - struct ksegrp *newkg; - struct ksegrp *kg; - struct proc *p; - struct kse_mailbox mbx; - struct kse_upcall *newku; - int err, ncpus, sa = 0, first = 0; - struct thread *newtd; - - p = td->td_proc; - if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) - return (err); - - ncpus = mp_ncpus; - if (virtual_cpu != 0) - ncpus = virtual_cpu; - if (!(mbx.km_flags & KMF_BOUND)) - sa = TDF_SA; - else - ncpus = 1; - PROC_LOCK(p); - if (!(p->p_flag & P_SA)) { - first = 1; - p->p_flag |= P_SA; - } - PROC_UNLOCK(p); - if (!sa && !uap->newgroup && !first) - return (EINVAL); - kg = td->td_ksegrp; - if (uap->newgroup) { - /* Have race condition but it is cheap */ - if (p->p_numksegrps >= max_groups_per_proc) - return (EPROCLIM); - /* - * If we want a new KSEGRP it doesn't matter whether - * we have already fired up KSE mode before or not. - * We put the process in KSE mode and create a new KSEGRP. - */ - newkg = ksegrp_alloc(); - bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, - kg_startzero, kg_endzero)); - bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, - RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - if (p->p_numksegrps >= max_groups_per_proc) { - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - ksegrp_free(newkg); - return (EPROCLIM); - } - ksegrp_link(newkg, p); - sched_fork_ksegrp(td, newkg); - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - } else { - if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0) - return (EINVAL); - newkg = kg; - } - - /* - * Creating upcalls more than number of physical cpu does - * not help performance. - */ - if (newkg->kg_numupcalls >= ncpus) - return (EPROCLIM); - - if (newkg->kg_numupcalls == 0) { - /* - * Initialize KSE group - * - * For multiplxed group, set concurrancy equal to physical - * cpus. This increases concurrent even if userland - * is not MP safe and can only run on single CPU. - * In ideal world, every physical cpu should execute a thread. - * If there is enough KSEs, threads in kernel can be - * executed parallel on different cpus with full speed, - * Concurrent in kernel shouldn't be restricted by number of - * upcalls userland provides. Adding more upcall structures - * only increases concurrent in userland. - * - * For bound thread group, because there is only thread in the - * group, we only create one KSE for the group. Thread in this - * kind of group will never schedule an upcall when blocked, - * this intends to simulate pthread system scope thread. - */ - sched_set_concurrancy(newkg, ncpus); - } - newku = upcall_alloc(); - newku->ku_mailbox = uap->mbx; - newku->ku_func = mbx.km_func; - bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); - - /* For the first call this may not have been set */ - if (td->td_standin == NULL) - thread_alloc_spare(td, NULL); - - PROC_LOCK(p); - if (newkg->kg_numupcalls >= ncpus) { - PROC_UNLOCK(p); - upcall_free(newku); - return (EPROCLIM); - } - if (first && sa) { - SIGSETOR(p->p_siglist, td->td_siglist); - SIGEMPTYSET(td->td_siglist); - SIGFILLSET(td->td_sigmask); - SIG_CANTMASK(td->td_sigmask); - } - mtx_lock_spin(&sched_lock); - PROC_UNLOCK(p); - upcall_link(newku, newkg); - if (mbx.km_quantum) - newkg->kg_upquantum = max(1, mbx.km_quantum/tick); - - /* - * Each upcall structure has an owner thread, find which - * one owns it. - */ - if (uap->newgroup) { - /* - * Because new ksegrp hasn't thread, - * create an initial upcall thread to own it. - */ - newtd = thread_schedule_upcall(td, newku); - } else { - /* - * If current thread hasn't an upcall structure, - * just assign the upcall to it. - */ - if (td->td_upcall == NULL) { - newku->ku_owner = td; - td->td_upcall = newku; - newtd = td; - } else { - /* - * Create a new upcall thread to own it. - */ - newtd = thread_schedule_upcall(td, newku); - } - } - if (!sa) { - newtd->td_mailbox = mbx.km_curthread; - newtd->td_flags &= ~TDF_SA; - if (newtd != td) { - mtx_unlock_spin(&sched_lock); - cpu_set_upcall_kse(newtd, newku); - mtx_lock_spin(&sched_lock); - } - } else { - newtd->td_flags |= TDF_SA; - } - if (newtd != td) - setrunqueue(newtd); - mtx_unlock_spin(&sched_lock); - return (0); -} - /* * Initialize global thread allocation resources. */ @@ -861,8 +374,6 @@ ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), ksegrp_ctor, ksegrp_dtor, ksegrp_init, NULL, UMA_ALIGN_CACHE, 0); - upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), - NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); } /* @@ -876,19 +387,6 @@ mtx_unlock_spin(&kse_zombie_lock); } - -/* - * Stash an embarasingly extra upcall into the zombie upcall queue. - */ - -void -upcall_stash(struct kse_upcall *ku) -{ - mtx_lock_spin(&kse_zombie_lock); - TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); - mtx_unlock_spin(&kse_zombie_lock); -} - /* * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. */ @@ -908,25 +406,20 @@ { struct thread *td_first, *td_next; struct ksegrp *kg_first, * kg_next; - struct kse_upcall *ku_first, *ku_next; /* * Don't even bother to lock if none at this instant, * we really don't care about the next instant.. */ if ((!TAILQ_EMPTY(&zombie_threads)) - || (!TAILQ_EMPTY(&zombie_ksegrps)) - || (!TAILQ_EMPTY(&zombie_upcalls))) { + || (!TAILQ_EMPTY(&zombie_ksegrps))) { mtx_lock_spin(&kse_zombie_lock); td_first = TAILQ_FIRST(&zombie_threads); kg_first = TAILQ_FIRST(&zombie_ksegrps); - ku_first = TAILQ_FIRST(&zombie_upcalls); if (td_first) TAILQ_INIT(&zombie_threads); if (kg_first) TAILQ_INIT(&zombie_ksegrps); - if (ku_first) - TAILQ_INIT(&zombie_upcalls); mtx_unlock_spin(&kse_zombie_lock); while (td_first) { td_next = TAILQ_NEXT(td_first, td_runq); @@ -940,13 +433,9 @@ ksegrp_free(kg_first); kg_first = kg_next; } - while (ku_first) { - ku_next = TAILQ_NEXT(ku_first, ku_link); - upcall_free(ku_first); - ku_first = ku_next; - } + sched_GC(); + kse_GC(); } - sched_GC(); } /* @@ -1036,202 +525,6 @@ } /* - * Store the thread context in the UTS's mailbox. - * then add the mailbox at the head of a list we are building in user space. - * The list is anchored in the ksegrp structure. - */ -int -thread_export_context(struct thread *td, int willexit) -{ - struct proc *p; - struct ksegrp *kg; - uintptr_t mbx; - void *addr; - int error = 0, temp, sig; - mcontext_t mc; - - p = td->td_proc; - kg = td->td_ksegrp; - - /* Export the user/machine context. */ - get_mcontext(td, &mc, 0); - addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext); - error = copyout(&mc, addr, sizeof(mcontext_t)); - if (error) - goto bad; - - /* Exports clock ticks in kernel mode */ - addr = (caddr_t)(&td->td_mailbox->tm_sticks); - temp = fuword32(addr) + td->td_usticks; - if (suword32(addr, temp)) { - error = EFAULT; - goto bad; - } - - /* - * Post sync signal, or process SIGKILL and SIGSTOP. - * For sync signal, it is only possible when the signal is not - * caught by userland or process is being debugged. - */ - PROC_LOCK(p); - if (td->td_flags & TDF_NEEDSIGCHK) { - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_NEEDSIGCHK; - mtx_unlock_spin(&sched_lock); - mtx_lock(&p->p_sigacts->ps_mtx); - while ((sig = cursig(td)) != 0) - postsig(sig); - mtx_unlock(&p->p_sigacts->ps_mtx); - } - if (willexit) - SIGFILLSET(td->td_sigmask); - PROC_UNLOCK(p); - - /* Get address in latest mbox of list pointer */ - addr = (void *)(&td->td_mailbox->tm_next); - /* - * Put the saved address of the previous first - * entry into this one - */ - for (;;) { - mbx = (uintptr_t)kg->kg_completed; - if (suword(addr, mbx)) { - error = EFAULT; - goto bad; - } - PROC_LOCK(p); - if (mbx == (uintptr_t)kg->kg_completed) { - kg->kg_completed = td->td_mailbox; - /* - * The thread context may be taken away by - * other upcall threads when we unlock - * process lock. it's no longer valid to - * use it again in any other places. - */ - td->td_mailbox = NULL; - PROC_UNLOCK(p); - break; - } - PROC_UNLOCK(p); - } - td->td_usticks = 0; - return (0); - -bad: - PROC_LOCK(p); - sigexit(td, SIGILL); - return (error); -} - -/* - * Take the list of completed mailboxes for this KSEGRP and put them on this - * upcall's mailbox as it's the next one going up. - */ -static int -thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) -{ - struct proc *p = kg->kg_proc; - void *addr; - uintptr_t mbx; - - addr = (void *)(&ku->ku_mailbox->km_completed); - for (;;) { - mbx = (uintptr_t)kg->kg_completed; - if (suword(addr, mbx)) { - PROC_LOCK(p); - psignal(p, SIGSEGV); - PROC_UNLOCK(p); - return (EFAULT); - } - PROC_LOCK(p); - if (mbx == (uintptr_t)kg->kg_completed) { - kg->kg_completed = NULL; - PROC_UNLOCK(p); - break; - } - PROC_UNLOCK(p); - } - return (0); -} - -/* - * This function should be called at statclock interrupt time - */ -int -thread_statclock(int user) -{ - struct thread *td = curthread; - struct ksegrp *kg = td->td_ksegrp; - - if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA)) - return (0); - if (user) { - /* Current always do via ast() */ - mtx_lock_spin(&sched_lock); - td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); - mtx_unlock_spin(&sched_lock); - td->td_uuticks++; - } else { - if (td->td_mailbox != NULL) - td->td_usticks++; - else { - /* XXXKSE - * We will call thread_user_enter() for every - * kernel entry in future, so if the thread mailbox - * is NULL, it must be a UTS kernel, don't account - * clock ticks for it. - */ - } - } - return (0); -} - -/* - * Export state clock ticks for userland - */ -static int -thread_update_usr_ticks(struct thread *td, int user) -{ - struct proc *p = td->td_proc; - struct kse_thr_mailbox *tmbx; - struct kse_upcall *ku; - struct ksegrp *kg; - caddr_t addr; - u_int uticks; - - if ((ku = td->td_upcall) == NULL) - return (-1); - - tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); - if ((tmbx == NULL) || (tmbx == (void *)-1)) - return (-1); - if (user) { - uticks = td->td_uuticks; - td->td_uuticks = 0; - addr = (caddr_t)&tmbx->tm_uticks; - } else { - uticks = td->td_usticks; - td->td_usticks = 0; - addr = (caddr_t)&tmbx->tm_sticks; - } - if (uticks) { - if (suword32(addr, uticks+fuword32(addr))) { - PROC_LOCK(p); - psignal(p, SIGSEGV); - PROC_UNLOCK(p); - return (-2); - } - } - kg = td->td_ksegrp; - if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { - mtx_lock_spin(&sched_lock); - td->td_upcall->ku_flags |= KUF_DOUPCALL; - mtx_unlock_spin(&sched_lock); - } - return (0); -} - -/* * Discard the current thread and exit from its context. * * Because we can't free a thread while we're operating under its context, @@ -1428,404 +721,6 @@ } /* - * This function is intended to be used to initialize a spare thread - * for upcall. Initialize thread's large data area outside sched_lock - * for thread_schedule_upcall(). - */ -void -thread_alloc_spare(struct thread *td, struct thread *spare) -{ - - if (td->td_standin) - return; - if (spare == NULL) { - spare = thread_alloc(); - spare->td_tid = thread_new_tid(); - } else { - sched_init_thread(spare); - } - td->td_standin = spare; - bzero(&spare->td_startzero, - (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); - spare->td_proc = td->td_proc; - spare->td_ucred = crhold(td->td_ucred); -} - -/* - * Create a thread and schedule it for upcall on the KSE given. - * Use our thread's standin so that we don't have to allocate one. - */ -struct thread * -thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) -{ - struct thread *td2; - - mtx_assert(&sched_lock, MA_OWNED); - - /* - * Schedule an upcall thread on specified kse_upcall, - * the kse_upcall must be free. - * td must have a spare thread. - */ - KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); - if ((td2 = td->td_standin) != NULL) { - td->td_standin = NULL; - } else { - panic("no reserve thread when scheduling an upcall"); - return (NULL); - } - CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", - td2, td->td_proc->p_pid, td->td_proc->p_comm); - bcopy(&td->td_startcopy, &td2->td_startcopy, - (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); - thread_link(td2, ku->ku_ksegrp); - /* inherit parts of blocked thread's context as a good template */ - cpu_set_upcall(td2, td); - /* Let the new thread become owner of the upcall */ - ku->ku_owner = td2; - td2->td_upcall = ku; - td2->td_flags = TDF_SA; - td2->td_pflags = TDP_UPCALLING; - td2->td_state = TDS_CAN_RUN; - td2->td_inhibitors = 0; - SIGFILLSET(td2->td_sigmask); - SIG_CANTMASK(td2->td_sigmask); - sched_fork_thread(td, td2); - return (td2); /* bogus.. should be a void function */ -} - -/* - * It is only used when thread generated a trap and process is being - * debugged. - */ -void -thread_signal_add(struct thread *td, int sig) -{ - struct proc *p; - siginfo_t siginfo; - struct sigacts *ps; - int error; - - p = td->td_proc; - PROC_LOCK_ASSERT(p, MA_OWNED); - ps = p->p_sigacts; - mtx_assert(&ps->ps_mtx, MA_OWNED); - - cpu_thread_siginfo(sig, 0, &siginfo); - mtx_unlock(&ps->ps_mtx); - PROC_UNLOCK(p); - error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo)); - if (error) { - PROC_LOCK(p); - sigexit(td, SIGILL); - } - PROC_LOCK(p); - SIGADDSET(td->td_sigmask, sig); - mtx_lock(&ps->ps_mtx); -} - -void -thread_switchout(struct thread *td) -{ - struct kse_upcall *ku; - struct thread *td2; - - mtx_assert(&sched_lock, MA_OWNED); - - /* - * If the outgoing thread is in threaded group and has never - * scheduled an upcall, decide whether this is a short - * or long term event and thus whether or not to schedule - * an upcall. - * If it is a short term event, just suspend it in - * a way that takes its KSE with it. - * Select the events for which we want to schedule upcalls. - * For now it's just sleep. - * XXXKSE eventually almost any inhibition could do. - */ - if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { - /* - * Release ownership of upcall, and schedule an upcall - * thread, this new upcall thread becomes the owner of - * the upcall structure. - */ - ku = td->td_upcall; - ku->ku_owner = NULL; - td->td_upcall = NULL; - td->td_flags &= ~TDF_CAN_UNBIND; - td2 = thread_schedule_upcall(td, ku); - setrunqueue(td2); - } -} - -/* - * Setup done on the thread when it enters the kernel. - * XXXKSE Presently only for syscalls but eventually all kernel entries. - */ -void -thread_user_enter(struct proc *p, struct thread *td) -{ - struct ksegrp *kg; - struct kse_upcall *ku; - struct kse_thr_mailbox *tmbx; - uint32_t tflags; - - kg = td->td_ksegrp; - - /* - * First check that we shouldn't just abort. - * But check if we are the single thread first! - */ - if (p->p_flag & P_SINGLE_EXIT) { - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - thread_stopped(p); - thread_exit(); - /* NOTREACHED */ - } - - /* - * If we are doing a syscall in a KSE environment, - * note where our mailbox is. There is always the - * possibility that we could do this lazily (in kse_reassign()), - * but for now do it every time. - */ - kg = td->td_ksegrp; - if (td->td_flags & TDF_SA) { - ku = td->td_upcall; - KASSERT(ku, ("%s: no upcall owned", __func__)); - KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); - KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__)); - ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags); - tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); >>> TRUNCATED FOR MAIL (1000 lines) <<<
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200405022156.i42Lukij086945>