Date: Mon, 5 Jun 2006 03:15:58 GMT From: Peter Wemm <peter@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 98517 for review Message-ID: <200606050315.k553FwaP003479@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=98517 Change 98517 by peter@peter_daintree on 2006/06/05 03:15:23 Checkpoint my cvs tree. This actually boots. But, it breaks libpthread, hence the name of the branch. I'm submitting this from my laptop running kde/firefox/etc. /etc/libmap.conf:"libpthread.so.1 libthr.so.1" helps. (UP, sched_4bsd, i386 tested.) Affected files ... .. //depot/projects/bike_sched/sys/amd64/amd64/machdep.c#2 edit .. //depot/projects/bike_sched/sys/amd64/amd64/trap.c#2 edit .. //depot/projects/bike_sched/sys/amd64/amd64/vm_machdep.c#2 edit .. //depot/projects/bike_sched/sys/arm/arm/trap.c#2 edit .. //depot/projects/bike_sched/sys/arm/at91/kb920x_machdep.c#2 edit .. //depot/projects/bike_sched/sys/arm/sa11x0/assabet_machdep.c#2 edit .. //depot/projects/bike_sched/sys/arm/xscale/i80321/iq31244_machdep.c#2 edit .. //depot/projects/bike_sched/sys/conf/files#2 edit .. //depot/projects/bike_sched/sys/ddb/db_ps.c#2 edit .. //depot/projects/bike_sched/sys/i386/i386/machdep.c#2 edit .. //depot/projects/bike_sched/sys/i386/i386/sys_machdep.c#2 edit .. //depot/projects/bike_sched/sys/i386/i386/trap.c#2 edit .. //depot/projects/bike_sched/sys/ia64/ia64/machdep.c#2 edit .. //depot/projects/bike_sched/sys/kern/init_main.c#2 edit .. //depot/projects/bike_sched/sys/kern/init_sysent.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_clock.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_fork.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_idle.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_intr.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_kse.c#2 delete .. //depot/projects/bike_sched/sys/kern/kern_poll.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_proc.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_resource.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_sig.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_subr.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_switch.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_synch.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_thr.c#2 edit .. //depot/projects/bike_sched/sys/kern/kern_thread.c#2 edit .. //depot/projects/bike_sched/sys/kern/sched_4bsd.c#2 edit .. //depot/projects/bike_sched/sys/kern/sched_ule.c#2 edit .. //depot/projects/bike_sched/sys/kern/subr_trap.c#2 edit .. //depot/projects/bike_sched/sys/kern/sys_process.c#2 edit .. //depot/projects/bike_sched/sys/kern/syscalls.c#2 edit .. //depot/projects/bike_sched/sys/kern/syscalls.master#2 edit .. //depot/projects/bike_sched/sys/kern/tty.c#2 edit .. //depot/projects/bike_sched/sys/pc98/pc98/machdep.c#2 edit .. //depot/projects/bike_sched/sys/posix4/ksched.c#2 edit .. //depot/projects/bike_sched/sys/powerpc/powerpc/machdep.c#2 edit .. //depot/projects/bike_sched/sys/sparc64/sparc64/machdep.c#2 edit .. //depot/projects/bike_sched/sys/sys/proc.h#2 edit .. //depot/projects/bike_sched/sys/sys/rtprio.h#2 edit .. //depot/projects/bike_sched/sys/sys/sched.h#2 edit .. //depot/projects/bike_sched/sys/sys/syscall.h#2 edit .. //depot/projects/bike_sched/sys/sys/syscall.mk#2 edit .. //depot/projects/bike_sched/sys/sys/sysproto.h#2 edit .. //depot/projects/bike_sched/sys/vm/vm_glue.c#2 edit .. //depot/projects/bike_sched/sys/vm/vm_zeroidle.c#2 edit Differences ... ==== //depot/projects/bike_sched/sys/amd64/amd64/machdep.c#2 (text+ko) ==== @@ -1137,7 +1137,7 @@ * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ - proc_linkup(&proc0, &ksegrp0, &thread0); + proc_linkup(&proc0, &thread0); preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); preload_bootstrap_relocate(KERNBASE); ==== //depot/projects/bike_sched/sys/amd64/amd64/trap.c#2 (text+ko) ==== @@ -301,8 +301,6 @@ case T_PAGEFLT: /* page fault */ addr = frame.tf_addr; - if (td->td_pflags & TDP_SA) - thread_user_enter(td); i = trap_pfault(&frame, TRUE); if (i == -1) goto userout; @@ -759,8 +757,6 @@ td->td_frame = &frame; if (td->td_ucred != p->p_ucred) cred_update_thread(td); - if (p->p_flag & P_SA) - thread_user_enter(td); params = (caddr_t)frame.tf_rsp + sizeof(register_t); code = frame.tf_rax; orig_tf_rflags = frame.tf_rflags; ==== //depot/projects/bike_sched/sys/amd64/amd64/vm_machdep.c#2 (text+ko) ==== @@ -311,15 +311,6 @@ stack_t *stack) { - /* - * Do any extra cleaning that needs to be done. - * The thread may have optional components - * that are not present in a fresh thread. - * This may be a recycled thread so make it look - * as though it's newly allocated. - */ - cpu_thread_clean(td); - /* * Set the trap frame to point at the beginning of the uts * function. ==== //depot/projects/bike_sched/sys/arm/arm/trap.c#2 (text+ko) ==== @@ -264,8 +264,6 @@ td->td_frame = tf; if (td->td_ucred != td->td_proc->p_ucred) cred_update_thread(td); - if (td->td_pflags & TDP_SA) - thread_user_enter(td); } /* Grab the current pcb */ ==== //depot/projects/bike_sched/sys/arm/at91/kb920x_machdep.c#2 (text) ==== @@ -382,7 +382,7 @@ undefined_handler_address = (u_int)undefinedinstruction_bounce; undefined_init(); - proc_linkup(&proc0, &ksegrp0, &thread0); + proc_linkup(&proc0, &thread0); thread0.td_kstack = kernelstack.pv_va; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; ==== //depot/projects/bike_sched/sys/arm/sa11x0/assabet_machdep.c#2 (text+ko) ==== @@ -413,7 +413,7 @@ /* Set stack for exception handlers */ - proc_linkup(&proc0, &ksegrp0, &thread0); + proc_linkup(&proc0, &thread0); thread0.td_kstack = kernelstack.pv_va; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; ==== //depot/projects/bike_sched/sys/arm/xscale/i80321/iq31244_machdep.c#2 (text+ko) ==== @@ -428,7 +428,7 @@ undefined_handler_address = (u_int)undefinedinstruction_bounce; undefined_init(); - proc_linkup(&proc0, &ksegrp0, &thread0); + proc_linkup(&proc0, &thread0); thread0.td_kstack = kernelstack.pv_va; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; ==== //depot/projects/bike_sched/sys/conf/files#2 (text+ko) ==== @@ -1279,7 +1279,6 @@ kern/kern_idle.c standard kern/kern_intr.c standard kern/kern_jail.c standard -kern/kern_kse.c standard kern/kern_kthread.c standard kern/kern_ktr.c optional ktr kern/kern_ktrace.c standard ==== //depot/projects/bike_sched/sys/ddb/db_ps.c#2 (text+ko) ==== @@ -295,7 +295,6 @@ db_printf("Thread %d at %p:\n", td->td_tid, td); db_printf(" proc (pid %d): %p ", td->td_proc->p_pid, td->td_proc); - db_printf(" ksegrp: %p\n", td->td_ksegrp); if (td->td_name[0] != '\0') db_printf(" name: %s\n", td->td_name); db_printf(" flags: %#x ", td->td_flags); ==== //depot/projects/bike_sched/sys/i386/i386/machdep.c#2 (text+ko) ==== @@ -2071,7 +2071,7 @@ * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ - proc_linkup(&proc0, &ksegrp0, &thread0); + proc_linkup(&proc0, &thread0); metadata_missing = 0; if (bootinfo.bi_modulep) { ==== //depot/projects/bike_sched/sys/i386/i386/sys_machdep.c#2 (text+ko) ==== @@ -233,9 +233,6 @@ 0 /* granularity */ }; - if (td->td_proc->p_flag & P_SA) - return (EINVAL); /* XXXKSE */ -/* XXXKSE All the code below only works in 1:1 needs changing */ ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1)); if (ext == 0) return (ENOMEM); ==== //depot/projects/bike_sched/sys/i386/i386/trap.c#2 (text+ko) ==== @@ -348,9 +348,6 @@ break; case T_PAGEFLT: /* page fault */ - if (td->td_pflags & TDP_SA) - thread_user_enter(td); - i = trap_pfault(&frame, TRUE, eva); #if defined(I586_CPU) && !defined(NO_F00F_HACK) if (i == -2) { @@ -938,8 +935,6 @@ td->td_frame = &frame; if (td->td_ucred != p->p_ucred) cred_update_thread(td); - if (p->p_flag & P_SA) - thread_user_enter(td); params = (caddr_t)frame.tf_esp + sizeof(int); code = frame.tf_eax; orig_tf_eflags = frame.tf_eflags; ==== //depot/projects/bike_sched/sys/ia64/ia64/machdep.c#2 (text+ko) ==== @@ -767,7 +767,7 @@ msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE); msgbufinit(msgbufp, MSGBUF_SIZE); - proc_linkup(&proc0, &ksegrp0, &thread0); + proc_linkup(&proc0, &thread0); /* * Init mapping for kernel stack for proc 0 */ ==== //depot/projects/bike_sched/sys/kern/init_main.c#2 (text+ko) ==== @@ -95,7 +95,6 @@ static struct pgrp pgrp0; struct proc proc0; struct thread thread0 __aligned(8); -struct ksegrp ksegrp0; struct vmspace vmspace0; struct proc *initproc; @@ -363,12 +362,10 @@ struct proc *p; unsigned i; struct thread *td; - struct ksegrp *kg; GIANT_REQUIRED; p = &proc0; td = &thread0; - kg = &ksegrp0; /* * Initialize magic number. @@ -376,14 +373,14 @@ p->p_magic = P_MAGIC; /* - * Initialize thread, process and ksegrp structures. + * Initialize thread and process structures. */ procinit(); /* set up proc zone */ - threadinit(); /* set up thead, upcall and KSEGRP zones */ + threadinit(); /* set up UMA zones */ /* * Initialise scheduler resources. - * Add scheduler specific parts to proc, ksegrp, thread as needed. + * Add scheduler specific parts to proc, thread as needed. */ schedinit(); /* scheduler gets its house in order */ /* @@ -420,8 +417,8 @@ STAILQ_INIT(&p->p_ktr); p->p_nice = NZERO; td->td_state = TDS_RUNNING; - kg->kg_pri_class = PRI_TIMESHARE; - kg->kg_user_pri = PUSER; + td->td_pri_class = PRI_TIMESHARE; + td->td_user_pri = PUSER; td->td_priority = PVM; td->td_base_pri = PUSER; td->td_oncpu = 0; ==== //depot/projects/bike_sched/sys/kern/init_sysent.c#2 (text+ko) ==== @@ -2,8 +2,8 @@ * System call switch table. * * DO NOT EDIT-- this file is automatically generated. - * $FreeBSD: src/sys/kern/init_sysent.c,v 1.211 2006/03/23 08:48:37 davidxu Exp $ - * created from FreeBSD: src/sys/kern/syscalls.master,v 1.213 2006/03/23 08:46:41 davidxu Exp + * $FreeBSD$ + * created from FreeBSD: src/sys/kern/syscalls.master,v 1.215 2006/03/28 14:32:37 des Exp */ #include "opt_compat.h" @@ -408,11 +408,11 @@ { SYF_MPSAFE | AS(eaccess_args), (sy_call_t *)eaccess, AUE_EACCESS }, /* 376 = eaccess */ { 0, (sy_call_t *)nosys, AUE_NULL }, /* 377 = afs_syscall */ { AS(nmount_args), (sy_call_t *)nmount, AUE_NMOUNT }, /* 378 = nmount */ - { SYF_MPSAFE | 0, (sy_call_t *)kse_exit, AUE_NULL }, /* 379 = kse_exit */ - { SYF_MPSAFE | AS(kse_wakeup_args), (sy_call_t *)kse_wakeup, AUE_NULL }, /* 380 = kse_wakeup */ - { SYF_MPSAFE | AS(kse_create_args), (sy_call_t *)kse_create, AUE_NULL }, /* 381 = kse_create */ - { SYF_MPSAFE | AS(kse_thr_interrupt_args), (sy_call_t *)kse_thr_interrupt, AUE_NULL }, /* 382 = kse_thr_interrupt */ - { SYF_MPSAFE | AS(kse_release_args), (sy_call_t *)kse_release, AUE_NULL }, /* 383 = kse_release */ + { 0, (sy_call_t *)nosys, AUE_NULL }, /* 379 = kse_exit */ + { 0, (sy_call_t *)nosys, AUE_NULL }, /* 380 = kse_wakeup */ + { 0, (sy_call_t *)nosys, AUE_NULL }, /* 381 = kse_create */ + { 0, (sy_call_t *)nosys, AUE_NULL }, /* 382 = kse_thr_interrupt */ + { 0, (sy_call_t *)nosys, AUE_NULL }, /* 383 = kse_release */ { SYF_MPSAFE | AS(__mac_get_proc_args), (sy_call_t *)__mac_get_proc, AUE_NULL }, /* 384 = __mac_get_proc */ { SYF_MPSAFE | AS(__mac_set_proc_args), (sy_call_t *)__mac_set_proc, AUE_NULL }, /* 385 = __mac_set_proc */ { SYF_MPSAFE | AS(__mac_get_fd_args), (sy_call_t *)__mac_get_fd, AUE_NULL }, /* 386 = __mac_get_fd */ @@ -469,7 +469,7 @@ { SYF_MPSAFE | AS(extattr_list_fd_args), (sy_call_t *)extattr_list_fd, AUE_NULL }, /* 437 = extattr_list_fd */ { SYF_MPSAFE | AS(extattr_list_file_args), (sy_call_t *)extattr_list_file, AUE_NULL }, /* 438 = extattr_list_file */ { SYF_MPSAFE | AS(extattr_list_link_args), (sy_call_t *)extattr_list_link, AUE_NULL }, /* 439 = extattr_list_link */ - { SYF_MPSAFE | AS(kse_switchin_args), (sy_call_t *)kse_switchin, AUE_NULL }, /* 440 = kse_switchin */ + { 0, (sy_call_t *)nosys, AUE_NULL }, /* 440 = kse_switchin */ { SYF_MPSAFE | AS(ksem_timedwait_args), (sy_call_t *)lkmressys, AUE_NULL }, /* 441 = ksem_timedwait */ { SYF_MPSAFE | AS(thr_suspend_args), (sy_call_t *)thr_suspend, AUE_NULL }, /* 442 = thr_suspend */ { SYF_MPSAFE | AS(thr_wake_args), (sy_call_t *)thr_wake, AUE_NULL }, /* 443 = thr_wake */ ==== //depot/projects/bike_sched/sys/kern/kern_clock.c#2 (text+ko) ==== @@ -201,21 +201,17 @@ * Run current process's virtual and profile time, as needed. */ mtx_lock_spin_flags(&sched_lock, MTX_QUIET); - if (p->p_flag & P_SA) { - /* XXXKSE What to do? */ - } else { - pstats = p->p_stats; - if (usermode && - timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && - itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { - p->p_sflag |= PS_ALRMPEND; - td->td_flags |= TDF_ASTPENDING; - } - if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && - itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { - p->p_sflag |= PS_PROFPEND; - td->td_flags |= TDF_ASTPENDING; - } + pstats = p->p_stats; + if (usermode && + timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && + itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { + p->p_sflag |= PS_ALRMPEND; + td->td_flags |= TDF_ASTPENDING; + } + if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && + itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { + p->p_sflag |= PS_PROFPEND; + td->td_flags |= TDF_ASTPENDING; } mtx_unlock_spin_flags(&sched_lock, MTX_QUIET); @@ -413,8 +409,6 @@ /* * Charge the time as appropriate. */ - if (p->p_flag & P_SA) - thread_statclock(1); td->td_uticks++; if (p->p_nice > NZERO) cp_time[CP_NICE]++; @@ -438,8 +432,6 @@ td->td_iticks++; cp_time[CP_INTR]++; } else { - if (p->p_flag & P_SA) - thread_statclock(0); td->td_pticks++; td->td_sticks++; if (td != PCPU_GET(idlethread)) ==== //depot/projects/bike_sched/sys/kern/kern_fork.c#2 (text+ko) ==== @@ -205,7 +205,6 @@ struct filedesc *fd; struct filedesc_to_leader *fdtol; struct thread *td2; - struct ksegrp *kg2; struct sigacts *newsigacts; int error; @@ -472,7 +471,6 @@ * then copy the section that is copied directly from the parent. */ td2 = FIRST_THREAD_IN_PROC(p2); - kg2 = FIRST_KSEGRP_IN_PROC(p2); /* Allocate and switch to an alternate kstack if specified. */ if (pages != 0) @@ -485,15 +483,11 @@ __rangeof(struct proc, p_startzero, p_endzero)); bzero(&td2->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); - bzero(&kg2->kg_startzero, - __rangeof(struct ksegrp, kg_startzero, kg_endzero)); bcopy(&p1->p_startcopy, &p2->p_startcopy, __rangeof(struct proc, p_startcopy, p_endcopy)); bcopy(&td->td_startcopy, &td2->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); - bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy, - __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); td2->td_sigstk = td->td_sigstk; td2->td_sigmask = td->td_sigmask; ==== //depot/projects/bike_sched/sys/kern/kern_idle.c#2 (text+ko) ==== @@ -79,7 +79,7 @@ td = FIRST_THREAD_IN_PROC(p); TD_SET_CAN_RUN(td); td->td_flags |= TDF_IDLETD; - sched_class(td->td_ksegrp, PRI_IDLE); + sched_class(td, PRI_IDLE); sched_prio(td, PRI_MAX_IDLE); mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); ==== //depot/projects/bike_sched/sys/kern/kern_intr.c#2 (text+ko) ==== @@ -296,7 +296,7 @@ panic("kthread_create() failed with %d", error); td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ mtx_lock_spin(&sched_lock); - td->td_ksegrp->kg_pri_class = PRI_ITHD; + td->td_pri_class = PRI_ITHD; TD_SET_IWAIT(td); mtx_unlock_spin(&sched_lock); td->td_pflags |= TDP_ITHREAD; ==== //depot/projects/bike_sched/sys/kern/kern_poll.c#2 (text+ko) ==== @@ -581,7 +581,7 @@ rtp.prio = RTP_PRIO_MAX; /* lowest priority */ rtp.type = RTP_PRIO_IDLE; mtx_lock_spin(&sched_lock); - rtp_to_pri(&rtp, td->td_ksegrp); + rtp_to_pri(&rtp, td); mtx_unlock_spin(&sched_lock); for (;;) { ==== //depot/projects/bike_sched/sys/kern/kern_proc.c#2 (text+ko) ==== @@ -142,9 +142,6 @@ { struct proc *p; struct thread *td; -#ifdef INVARIANTS - struct ksegrp *kg; -#endif /* INVARIANTS checks go here */ p = (struct proc *)mem; @@ -152,10 +149,7 @@ #ifdef INVARIANTS KASSERT((p->p_numthreads == 1), ("bad number of threads in exiting process")); - KASSERT((p->p_numksegrps == 1), ("free proc with > 1 ksegrp")); KASSERT((td != NULL), ("proc_dtor: bad thread pointer")); - kg = FIRST_KSEGRP_IN_PROC(p); - KASSERT((kg != NULL), ("proc_dtor: bad kg pointer")); KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr")); #endif @@ -178,17 +172,14 @@ { struct proc *p; struct thread *td; - struct ksegrp *kg; p = (struct proc *)mem; p->p_sched = (struct p_sched *)&p[1]; td = thread_alloc(); - kg = ksegrp_alloc(); bzero(&p->p_mtx, sizeof(struct mtx)); mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); p->p_stats = pstats_alloc(); - proc_linkup(p, kg, td); - sched_newproc(p, kg, td); + proc_linkup(p, td); return (0); } @@ -204,7 +195,6 @@ p = (struct proc *)mem; pstats_free(p->p_stats); - ksegrp_free(FIRST_KSEGRP_IN_PROC(p)); thread_free(FIRST_THREAD_IN_PROC(p)); mtx_destroy(&p->p_mtx); if (p->p_ksi != NULL) @@ -760,7 +750,6 @@ static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp) { - struct ksegrp *kg; struct proc *p; p = td->td_proc; @@ -800,14 +789,6 @@ kp->ki_stat = SIDL; } - kg = td->td_ksegrp; - - /* things in the KSE GROUP */ - kp->ki_estcpu = kg->kg_estcpu; - kp->ki_slptime = kg->kg_slptime; - kp->ki_pri.pri_user = kg->kg_user_pri; - kp->ki_pri.pri_class = kg->kg_pri_class; - /* Things in the thread */ kp->ki_wchan = td->td_wchan; kp->ki_pri.pri_level = td->td_priority; @@ -820,6 +801,10 @@ kp->ki_pcb = td->td_pcb; kp->ki_kstack = (void *)td->td_kstack; kp->ki_pctcpu = sched_pctcpu(td); + kp->ki_estcpu = td->td_estcpu; + kp->ki_slptime = td->td_slptime; + kp->ki_pri.pri_class = td->td_pri_class; + kp->ki_pri.pri_user = td->td_user_pri; /* We can't get this anymore but ps etc never used it anyway. */ kp->ki_rqindex = 0; ==== //depot/projects/bike_sched/sys/kern/kern_resource.c#2 (text+ko) ==== @@ -292,7 +292,7 @@ { struct proc *curp; struct proc *p; - struct ksegrp *kg; + struct thread *tdp; struct rtprio rtp; int cierror, error; @@ -328,14 +328,14 @@ * as leaving it zero. */ if (uap->pid == 0) { - pri_to_rtp(td->td_ksegrp, &rtp); + pri_to_rtp(td, &rtp); } else { struct rtprio rtp2; rtp.type = RTP_PRIO_IDLE; rtp.prio = RTP_PRIO_MAX; - FOREACH_KSEGRP_IN_PROC(p, kg) { - pri_to_rtp(kg, &rtp2); + FOREACH_THREAD_IN_PROC(p, tdp) { + pri_to_rtp(tdp, &rtp2); if (rtp2.type < rtp.type || (rtp2.type == rtp.type && rtp2.prio < rtp.prio)) { @@ -378,18 +378,17 @@ /* * If we are setting our own priority, set just our - * KSEGRP but if we are doing another process, - * do all the groups on that process. If we + * thread but if we are doing another process, + * do all the threads on that process. If we * specify our own pid we do the latter. */ mtx_lock_spin(&sched_lock); if (uap->pid == 0) { - error = rtp_to_pri(&rtp, td->td_ksegrp); + error = rtp_to_pri(&rtp, td); } else { - FOREACH_KSEGRP_IN_PROC(p, kg) { - if ((error = rtp_to_pri(&rtp, kg)) != 0) { + FOREACH_THREAD_IN_PROC(p, td) { + if ((error = rtp_to_pri(&rtp, td)) != 0) break; - } } } mtx_unlock_spin(&sched_lock); @@ -403,7 +402,7 @@ } int -rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg) +rtp_to_pri(struct rtprio *rtp, struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); @@ -411,43 +410,42 @@ return (EINVAL); switch (RTP_PRIO_BASE(rtp->type)) { case RTP_PRIO_REALTIME: - kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio; + td->td_user_pri = PRI_MIN_REALTIME + rtp->prio; break; case RTP_PRIO_NORMAL: - kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio; + td->td_user_pri = PRI_MIN_TIMESHARE + rtp->prio; break; case RTP_PRIO_IDLE: - kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio; + td->td_user_pri = PRI_MIN_IDLE + rtp->prio; break; default: return (EINVAL); } - sched_class(kg, rtp->type); - if (curthread->td_ksegrp == kg) { - sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */ - } + sched_class(td, rtp->type); /* XXX fix */ + if (curthread == td) + sched_prio(curthread, td->td_user_pri); /* XXX dubious */ return (0); } void -pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp) +pri_to_rtp(struct thread *td, struct rtprio *rtp) { mtx_assert(&sched_lock, MA_OWNED); - switch (PRI_BASE(kg->kg_pri_class)) { + switch (PRI_BASE(td->td_pri_class)) { case PRI_REALTIME: - rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME; + rtp->prio = td->td_user_pri - PRI_MIN_REALTIME; break; case PRI_TIMESHARE: - rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE; + rtp->prio = td->td_user_pri - PRI_MIN_TIMESHARE; break; case PRI_IDLE: - rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE; + rtp->prio = td->td_user_pri - PRI_MIN_IDLE; break; default: break; } - rtp->type = kg->kg_pri_class; + rtp->type = td->td_pri_class; } #if defined(COMPAT_43) ==== //depot/projects/bike_sched/sys/kern/kern_sig.c#2 (text+ko) ==== @@ -96,7 +96,6 @@ static struct thread *sigtd(struct proc *p, int sig, int prop); static int kern_sigtimedwait(struct thread *, sigset_t, ksiginfo_t *, struct timespec *); -static int do_tdsignal(struct proc *, struct thread *, int, ksiginfo_t *); static void sigqueue_start(void); static uma_zone_t ksiginfo_zone = NULL; @@ -570,7 +569,7 @@ signotify(struct thread *td) { struct proc *p; - sigset_t set, saved; + sigset_t set; p = td->td_proc; @@ -581,8 +580,6 @@ * previously masked by all threads to our sigqueue. */ set = p->p_sigqueue.sq_signals; - if (p->p_flag & P_SA) - saved = p->p_sigqueue.sq_signals; SIGSETNAND(set, td->td_sigmask); if (! SIGISEMPTY(set)) sigqueue_move_set(&p->p_sigqueue, &td->td_sigqueue, &set); @@ -591,13 +588,6 @@ td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING; mtx_unlock_spin(&sched_lock); } - if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { - if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) { - /* pending set changed */ - p->p_flag |= P_SIGEVENT; - wakeup(&p->p_siglist); - } - } } int @@ -749,11 +739,6 @@ if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || (sigprop(sig) & SA_IGNORE && ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { - if ((p->p_flag & P_SA) && - SIGISMEMBER(p->p_sigqueue.sq_signals, sig)) { - p->p_flag |= P_SIGEVENT; - wakeup(&p->p_siglist); - } /* never to be seen again */ sigqueue_delete_proc(p, sig); if (sig != SIGCONT) @@ -1211,10 +1196,6 @@ continue; if (!SIGISMEMBER(td->td_sigqueue.sq_signals, i)) { if (SIGISMEMBER(p->p_sigqueue.sq_signals, i)) { - if (p->p_flag & P_SA) { - p->p_flag |= P_SIGEVENT; - wakeup(&p->p_siglist); - } sigqueue_move(&p->p_sigqueue, &td->td_sigqueue, i); } else @@ -1887,7 +1868,6 @@ { struct sigacts *ps; struct proc *p; - int error; int sig; int code; @@ -1896,23 +1876,7 @@ code = ksi->ksi_code; KASSERT(_SIG_VALID(sig), ("invalid signal")); - if (td->td_pflags & TDP_SA) { - if (td->td_mailbox == NULL) - thread_user_enter(td); - PROC_LOCK(p); - SIGDELSET(td->td_sigmask, sig); - mtx_lock_spin(&sched_lock); - /* - * Force scheduling an upcall, so UTS has chance to - * process the signal before thread runs again in - * userland. - */ - if (td->td_upcall) - td->td_upcall->ku_flags |= KUF_DOUPCALL; - mtx_unlock_spin(&sched_lock); - } else { - PROC_LOCK(p); - } + PROC_LOCK(p); ps = p->p_sigacts; mtx_lock(&ps->ps_mtx); if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && @@ -1923,27 +1887,8 @@ ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], &td->td_sigmask, code); #endif - if (!(td->td_pflags & TDP_SA)) - (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], + (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], ksi, &td->td_sigmask); - else if (td->td_mailbox == NULL) { - mtx_unlock(&ps->ps_mtx); - /* UTS caused a sync signal */ - p->p_code = code; /* XXX for core dump/debugger */ - p->p_sig = sig; /* XXX to verify code */ - sigexit(td, sig); - } else { - mtx_unlock(&ps->ps_mtx); - SIGADDSET(td->td_sigmask, sig); - PROC_UNLOCK(p); - error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig, - sizeof(siginfo_t)); - PROC_LOCK(p); - /* UTS memory corrupted */ - if (error) - sigexit(td, SIGSEGV); - mtx_lock(&ps->ps_mtx); - } SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); if (!SIGISMEMBER(ps->ps_signodefer, sig)) SIGADDSET(td->td_sigmask, sig); @@ -2057,25 +2002,6 @@ int tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) { - sigset_t saved; - int ret; - - if (p->p_flag & P_SA) - saved = p->p_sigqueue.sq_signals; - ret = do_tdsignal(p, td, sig, ksi); - if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { - if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) { - /* pending set changed */ - p->p_flag |= P_SIGEVENT; - wakeup(&p->p_siglist); - } - } - return (ret); -} - -static int -do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) -{ sig_t action; sigqueue_t *sigqueue; int prop; @@ -2086,9 +2012,9 @@ PROC_LOCK_ASSERT(p, MA_OWNED); if (!_SIG_VALID(sig)) - panic("do_tdsignal(): invalid signal"); + panic("tdsignal(): invalid signal"); - KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("do_tdsignal: ksi on queue")); + KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("tdsignal: ksi on queue")); /* * IEEE Std 1003.1-2001: return success when killing a zombie. @@ -2250,11 +2176,6 @@ /* * The process wants to catch it so it needs * to run at least one thread, but which one? - * It would seem that the answer would be to - * run an upcall in the next KSE to run, and - * deliver the signal that way. In a NON KSE - * process, we need to make sure that the - * single thread is runnable asap. * XXXKSE for now however, make them all run. */ goto runfast; @@ -2548,8 +2469,6 @@ */ if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) { sigqueue_delete(&td->td_sigqueue, sig); - if (td->td_pflags & TDP_SA) - SIGADDSET(td->td_sigmask, sig); continue; } if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { @@ -2560,9 +2479,6 @@ newsig = ptracestop(td, sig); mtx_lock(&ps->ps_mtx); - if (td->td_pflags & TDP_SA) - SIGADDSET(td->td_sigmask, sig); - if (sig != newsig) { ksiginfo_t ksi; /* @@ -2586,8 +2502,6 @@ * signal is being masked, look for other signals. */ SIGADDSET(td->td_sigqueue.sq_signals, sig); - if (td->td_pflags & TDP_SA) - SIGDELSET(td->td_sigmask, sig); if (SIGISMEMBER(td->td_sigmask, sig)) continue; signotify(td); @@ -2750,7 +2664,7 @@ mtx_lock(&ps->ps_mtx); } - if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) { + if (action == SIG_DFL) { /* * Default action, where the default is to kill * the process. (Other cases were ignored above.) @@ -2759,13 +2673,6 @@ sigexit(td, sig); /* NOTREACHED */ } else { - if (td->td_pflags & TDP_SA) { - if (sig == SIGKILL) { - mtx_unlock(&ps->ps_mtx); - sigexit(td, sig); - } - } - /* * If we get here, the signal must be caught. */ @@ -2808,10 +2715,7 @@ p->p_code = 0; p->p_sig = 0; } - if (td->td_pflags & TDP_SA) - thread_signal_add(curthread, &ksi); - else - (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); + (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); } } ==== //depot/projects/bike_sched/sys/kern/kern_subr.c#2 (text+ko) ==== @@ -430,7 +430,7 @@ td = curthread; mtx_lock_spin(&sched_lock); DROP_GIANT(); - sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */ + sched_prio(td, td->td_user_pri); mi_switch(SW_INVOL, NULL); mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); ==== //depot/projects/bike_sched/sys/kern/kern_switch.c#2 (text+ko) ==== @@ -24,67 +24,6 @@ * SUCH DAMAGE. */ -/*** -Here is the logic.. - -If there are N processors, then there are at most N KSEs (kernel -schedulable entities) working to process threads that belong to a -KSEGROUP (kg). If there are X of these KSEs actually running at the -moment in question, then there are at most M (N-X) of these KSEs on -the run queue, as running KSEs are not on the queue. - -Runnable threads are queued off the KSEGROUP in priority order. -If there are M or more threads runnable, the top M threads -(by priority) are 'preassigned' to the M KSEs not running. The KSEs take -their priority from those threads and are put on the run queue. - -The last thread that had a priority high enough to have a KSE associated -with it, AND IS ON THE RUN QUEUE is pointed to by -kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs -assigned as all the available KSEs are activly running, or because there -are no threads queued, that pointer is NULL. - -When a KSE is removed from the run queue to become runnable, we know -it was associated with the highest priority thread in the queue (at the head -of the queue). If it is also the last assigned we know M was 1 and must -now be 0. Since the thread is no longer queued that pointer must be -removed from it. Since we know there were no more KSEs available, -(M was 1 and is now 0) and since we are not FREEING our KSE -but using it, we know there are STILL no more KSEs available, we can prove -that the next thread in the ksegrp list will not have a KSE to assign to -it, so we can show that the pointer must be made 'invalid' (NULL). - -The pointer exists so that when a new thread is made runnable, it can -have its priority compared with the last assigned thread to see if -it should 'steal' its KSE or not.. i.e. is it 'earlier' -on the list than that thread or later.. If it's earlier, then the KSE is -removed from the last assigned (which is now not assigned a KSE) -and reassigned to the new thread, which is placed earlier in the list. -The pointer is then backed up to the previous thread (which may or may not -be the new thread). - -When a thread sleeps or is removed, the KSE becomes available and if there -are queued threads that are not assigned KSEs, the highest priority one of -them is assigned the KSE, which is then placed back on the run queue at -the approipriate place, and the kg->kg_last_assigned pointer is adjusted down -to point to it. - -The following diagram shows 2 KSEs and 3 threads from a single process. - - RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) - \ \____ - \ \ - KSEGROUP---thread--thread--thread (queued in priority order) - \ / - \_______________/ - (last_assigned) - -The result of this scheme is that the M available KSEs are always -queued at the priorities they have inherrited from the M highest priority -threads for that KSEGROUP. If this situation changes, the KSEs are -reassigned to keep this true. -***/ - #include <sys/cdefs.h> __FBSDID("$FreeBSD: src/sys/kern/kern_switch.c,v 1.121 2006/06/01 22:45:56 cognet Exp $"); @@ -143,51 +82,35 @@ * Functions that manipulate runnability from a thread perspective. * ************************************************************************/ /* - * Select the KSE that will be run next. From that find the thread, and - * remove it from the KSEGRP's run queue. If there is thread clustering, - * this will be what does it. + * Select the thread that will be run next. */ struct thread * choosethread(void) { >>> TRUNCATED FOR MAIL (1000 lines) <<<
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200606050315.k553FwaP003479>