Date: Sun, 5 Sep 2004 22:29:46 GMT From: Julian Elischer <julian@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 61064 for review Message-ID: <200409052229.i85MTkfo048414@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=61064 Change 61064 by julian@julian_ref on 2004/09/05 22:29:38 Remerge with -current Affected files ... .. //depot/projects/nsched/sys/alpha/alpha/machdep.c#5 integrate .. //depot/projects/nsched/sys/amd64/amd64/machdep.c#9 integrate .. //depot/projects/nsched/sys/arm/sa11x0/assabet_machdep.c#4 integrate .. //depot/projects/nsched/sys/cam/scsi/scsi_cd.c#2 integrate .. //depot/projects/nsched/sys/conf/files#23 integrate .. //depot/projects/nsched/sys/ddb/db_ps.c#6 integrate .. //depot/projects/nsched/sys/dev/usb/ucycom.c#1 branch .. //depot/projects/nsched/sys/geom/geom_subr.c#4 integrate .. //depot/projects/nsched/sys/geom/notes#2 integrate .. //depot/projects/nsched/sys/i386/i386/machdep.c#14 integrate .. //depot/projects/nsched/sys/ia64/ia64/machdep.c#6 integrate .. //depot/projects/nsched/sys/isofs/cd9660/cd9660_vnops.c#3 integrate .. //depot/projects/nsched/sys/kern/init_main.c#13 integrate .. //depot/projects/nsched/sys/kern/kern_exec.c#11 integrate .. //depot/projects/nsched/sys/kern/kern_exit.c#18 integrate .. //depot/projects/nsched/sys/kern/kern_fork.c#12 integrate .. //depot/projects/nsched/sys/kern/kern_intr.c#8 integrate .. //depot/projects/nsched/sys/kern/kern_kse.c#27 integrate .. //depot/projects/nsched/sys/kern/kern_proc.c#12 integrate .. //depot/projects/nsched/sys/kern/kern_switch.c#10 integrate .. //depot/projects/nsched/sys/kern/kern_synch.c#10 integrate .. //depot/projects/nsched/sys/kern/kern_thr.c#14 integrate .. //depot/projects/nsched/sys/kern/kern_thread.c#34 integrate .. //depot/projects/nsched/sys/kern/sched_4bsd.c#36 integrate .. //depot/projects/nsched/sys/kern/sched_ule.c#23 integrate .. //depot/projects/nsched/sys/kern/uipc_socket.c#13 integrate .. //depot/projects/nsched/sys/modules/Makefile#11 integrate .. //depot/projects/nsched/sys/modules/ucycom/Makefile#1 branch .. //depot/projects/nsched/sys/netinet/ip_divert.c#8 integrate .. //depot/projects/nsched/sys/netinet/ip_fw2.c#10 integrate .. //depot/projects/nsched/sys/netinet/raw_ip.c#7 integrate .. //depot/projects/nsched/sys/netinet/tcp_output.c#7 integrate .. //depot/projects/nsched/sys/netinet/tcp_subr.c#8 integrate .. //depot/projects/nsched/sys/netinet/udp_usrreq.c#7 integrate .. //depot/projects/nsched/sys/netinet6/nd6.c#4 integrate .. //depot/projects/nsched/sys/pc98/i386/machdep.c#9 integrate .. //depot/projects/nsched/sys/powerpc/powerpc/machdep.c#5 integrate .. //depot/projects/nsched/sys/sparc64/sparc64/machdep.c#6 integrate .. //depot/projects/nsched/sys/sys/proc.h#25 integrate .. //depot/projects/nsched/sys/sys/sched.h#13 integrate Differences ... ==== //depot/projects/nsched/sys/alpha/alpha/machdep.c#5 (text+ko) ==== @@ -88,7 +88,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/alpha/alpha/machdep.c,v 1.222 2004/07/10 22:35:05 marcel Exp $"); +__FBSDID("$FreeBSD: src/sys/alpha/alpha/machdep.c,v 1.223 2004/09/05 02:09:51 julian Exp $"); #include "opt_compat.h" #include "opt_ddb.h" ==== //depot/projects/nsched/sys/amd64/amd64/machdep.c#9 (text+ko) ==== @@ -39,7 +39,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/amd64/amd64/machdep.c,v 1.619 2004/08/24 00:16:43 peter Exp $"); +__FBSDID("$FreeBSD: src/sys/amd64/amd64/machdep.c,v 1.620 2004/09/05 02:09:52 julian Exp $"); #include "opt_atalk.h" #include "opt_atpic.h" ==== //depot/projects/nsched/sys/arm/sa11x0/assabet_machdep.c#4 (text+ko) ==== @@ -47,7 +47,7 @@ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/arm/sa11x0/assabet_machdep.c,v 1.2 2004/06/17 17:52:12 cognet Exp $"); +__FBSDID("$FreeBSD: src/sys/arm/sa11x0/assabet_machdep.c,v 1.3 2004/09/05 02:09:52 julian Exp $"); #define _ARM32_BUS_DMA_PRIVATE #include <sys/param.h> ==== //depot/projects/nsched/sys/cam/scsi/scsi_cd.c#2 (text+ko) ==== @@ -46,7 +46,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/cam/scsi/scsi_cd.c,v 1.89 2004/02/18 21:36:50 phk Exp $"); +__FBSDID("$FreeBSD: src/sys/cam/scsi/scsi_cd.c,v 1.90 2004/09/05 21:15:58 phk Exp $"); #include "opt_cd.h" @@ -2723,7 +2723,7 @@ cdprevent(periph, PR_PREVENT); softc->disk->d_maxsize = DFLTPHYS; - softc->disk->d_sectorsize = 0; + softc->disk->d_sectorsize = 2048; softc->disk->d_mediasize = 0; /* ==== //depot/projects/nsched/sys/conf/files#23 (text+ko) ==== @@ -1,4 +1,4 @@ -# $FreeBSD: src/sys/conf/files,v 1.949 2004/09/02 20:44:56 alfred Exp $ +# $FreeBSD: src/sys/conf/files,v 1.951 2004/09/05 09:43:47 des Exp $ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and @@ -800,6 +800,7 @@ dev/usb/ubsa.c optional ubsa ucom dev/usb/ubser.c optional ubser dev/usb/ucom.c optional ucom +dev/usb/ucycom.c optional ucycom ucom dev/usb/udbp.c optional udbp dev/usb/ufm.c optional ufm dev/usb/uftdi.c optional uftdi ucom ==== //depot/projects/nsched/sys/ddb/db_ps.c#6 (text+ko) ==== @@ -28,7 +28,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/ddb/db_ps.c,v 1.52 2004/07/10 23:47:19 marcel Exp $"); +__FBSDID("$FreeBSD: src/sys/ddb/db_ps.c,v 1.53 2004/09/05 02:09:52 julian Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -100,7 +100,7 @@ p->p_ucred != NULL ? p->p_ucred->cr_ruid : 0, pp->p_pid, p->p_pgrp != NULL ? p->p_pgrp->pg_id : 0, p->p_flag, state); - if (p->p_flag & P_SA) + if (p->p_flag & P_HADTHREADS) db_printf("(threaded) %s\n", p->p_comm); FOREACH_THREAD_IN_PROC(p, td) { dumpthread(p, td); @@ -120,7 +120,7 @@ dumpthread(volatile struct proc *p, volatile struct thread *td) { - if (p->p_flag & P_SA) + if (p->p_flag & P_HADTHREADS) db_printf( " thread %p ksegrp %p ", td, td->td_ksegrp); if (TD_ON_SLEEPQ(td)) db_printf("[SLPQ %s %p]", td->td_wmesg, (void *)td->td_wchan); @@ -159,12 +159,11 @@ default: db_printf("[UNK: %#x]", td->td_state); } - if (p->p_flag & P_SA) { - /* - if (sched_fairness_print) { - (*sched_fairness_print)(td); - } - */ + if (p->p_flag & P_HADTHREADS) { +#ifdef KEF_DIDRUN + if (td->td_kse) + db_printf("[kse %p]", td->td_kse); +#endif db_printf("\n"); } else db_printf(" %s\n", p->p_comm); ==== //depot/projects/nsched/sys/geom/geom_subr.c#4 (text+ko) ==== @@ -34,7 +34,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/geom/geom_subr.c,v 1.81 2004/08/08 08:34:46 phk Exp $"); +__FBSDID("$FreeBSD: src/sys/geom/geom_subr.c,v 1.82 2004/09/05 21:15:58 phk Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -706,6 +706,9 @@ cp->acr += dcr; cp->acw += dcw; cp->ace += dce; + if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) + KASSERT(pp->sectorsize > 0, + ("Provider %s lacks sectorsize", pp->name)); } return (error); } ==== //depot/projects/nsched/sys/geom/notes#2 (text+ko) ==== @@ -1,4 +1,4 @@ -$FreeBSD: src/sys/geom/notes,v 1.4 2003/03/23 10:08:13 phk Exp $ +$FreeBSD: src/sys/geom/notes,v 1.5 2004/09/05 21:15:58 phk Exp $ For the lack of a better place to put them, this file will contain notes on some of the more intricate details of geom. @@ -138,3 +138,13 @@ geom_slice.h is special in that it documents a "library" for implementing a specific kind of class, and consequently does not appear in the above matrix. +----------------------------------------------------------------------- +Removable media. + +In general, the theory is that a drive creates the provider when it has +a media and destroys it when the media disappears. + +In a more realistic world, we will allow a provider to be opened medialess +(set any sectorsize and a mediasize==0) in order to allow operations like +open/close tray etc. + ==== //depot/projects/nsched/sys/i386/i386/machdep.c#14 (text+ko) ==== @@ -38,7 +38,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/i386/i386/machdep.c,v 1.597 2004/08/05 00:32:08 rwatson Exp $"); +__FBSDID("$FreeBSD: src/sys/i386/i386/machdep.c,v 1.598 2004/09/05 02:09:52 julian Exp $"); #include "opt_apic.h" #include "opt_atalk.h" ==== //depot/projects/nsched/sys/ia64/ia64/machdep.c#6 (text+ko) ==== @@ -24,7 +24,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD: src/sys/ia64/ia64/machdep.c,v 1.185 2004/08/16 18:54:22 marcel Exp $ + * $FreeBSD: src/sys/ia64/ia64/machdep.c,v 1.186 2004/09/05 02:09:53 julian Exp $ */ #include "opt_compat.h" ==== //depot/projects/nsched/sys/isofs/cd9660/cd9660_vnops.c#3 (text+ko) ==== @@ -35,7 +35,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/isofs/cd9660/cd9660_vnops.c,v 1.98 2004/04/07 20:46:09 imp Exp $"); +__FBSDID("$FreeBSD: src/sys/isofs/cd9660/cd9660_vnops.c,v 1.99 2004/09/05 11:18:53 tjr Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -167,14 +167,7 @@ struct iso_node *ip = VTOI(vp); vap->va_fsid = dev2udev(ip->i_dev); - - /* - * Don't use ip->i_ino for this since it is wrong for hard links. - * ip->i_ino should be the same as ip->iso_start (or not exist), - * but this currently doesn't work since we abuse it to look up - * parent directories from inodes. - */ - vap->va_fileid = ip->iso_start; + vap->va_fileid = ip->i_number; vap->va_mode = ip->inode.iso_mode; vap->va_nlink = ip->inode.iso_links; @@ -528,12 +521,11 @@ break; } - /* - * The "inode number" is iso_start, not i_ino, as in - * cd9660_getattr(). - */ - idp->current.d_fileno = isonum_711(ep->ext_attr_length) + - isonum_733(ep->extent); + if (isonum_711(ep->flags)&2) + idp->current.d_fileno = isodirino(ep, imp); + else + idp->current.d_fileno = dbtob(bp->b_blkno) + + entryoffsetinblock; idp->curroff += reclen; ==== //depot/projects/nsched/sys/kern/init_main.c#13 (text+ko) ==== @@ -42,7 +42,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/init_main.c,v 1.247 2004/09/01 02:11:27 julian Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/init_main.c,v 1.248 2004/09/05 02:09:53 julian Exp $"); #include "opt_init_path.h" #include "opt_mac.h" @@ -335,14 +335,12 @@ */ procinit(); /* set up proc zone */ threadinit(); /* set up thead, upcall and KSEGRP zones */ - kseinit(); /* set up kse specific stuff e.g. upcall zone*/ /* * Initialise scheduler resources. * Add scheduler specific parts to proc, ksegrp, thread as needed. */ schedinit(); /* scheduler gets its house in order */ - schedinit2(); /* temporary */ /* * Initialize sleep queue hash table */ ==== //depot/projects/nsched/sys/kern/kern_exec.c#11 (text+ko) ==== @@ -25,7 +25,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/kern_exec.c,v 1.249 2004/08/15 06:24:41 jmg Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/kern_exec.c,v 1.250 2004/09/05 02:09:53 julian Exp $"); #include "opt_ktrace.h" #include "opt_mac.h" ==== //depot/projects/nsched/sys/kern/kern_exit.c#18 (text+ko) ==== @@ -35,7 +35,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/kern_exit.c,v 1.245 2004/08/15 06:24:41 jmg Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/kern_exit.c,v 1.246 2004/09/05 02:09:53 julian Exp $"); #include "opt_compat.h" #include "opt_ktrace.h" ==== //depot/projects/nsched/sys/kern/kern_fork.c#12 (text+ko) ==== @@ -35,7 +35,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/kern_fork.c,v 1.237 2004/09/03 05:11:32 alc Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/kern_fork.c,v 1.238 2004/09/05 02:09:53 julian Exp $"); #include "opt_ktrace.h" #include "opt_mac.h" @@ -783,7 +783,7 @@ mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); cpu_critical_fork_exit(); CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)", - td, td->td_kse, p->p_pid, p->p_comm); + td, td->td_sched, p->p_pid, p->p_comm); /* * Processes normally resume in mi_switch() after being ==== //depot/projects/nsched/sys/kern/kern_intr.c#8 (text+ko) ==== @@ -25,7 +25,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/kern_intr.c,v 1.114 2004/09/01 02:11:27 julian Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/kern_intr.c,v 1.115 2004/09/05 02:09:53 julian Exp $"); #include "opt_ddb.h" @@ -443,7 +443,7 @@ *ithdp = ithd; } return (ithread_add_handler(ithd, name, handler, arg, - (pri /** RQ_PPQ*/) + PI_SOFT, flags, cookiep)); + (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); /* XXKSE.. think of a better way to get separate queues */ } ==== //depot/projects/nsched/sys/kern/kern_kse.c#27 (text+ko) ==== @@ -27,7 +27,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/kern_kse.c,v 1.199 2004/09/01 02:11:27 julian Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/kern_kse.c,v 1.200 2004/09/05 02:09:53 julian Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -347,7 +347,7 @@ * The other possibility would be to let the process exit. */ p->p_flag &= ~(P_SA|P_HADTHREADS); - sched_reset_concurrency(td->td_ksegrp); + sched_set_concurrency(td->td_ksegrp, 1); mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); #if 1 @@ -499,7 +499,7 @@ * No new KSEG: first call: use current KSE, don't schedule an upcall * All other situations, do allocate max new KSEs and schedule an upcall. * - * XXX should be changed so that 'first' behaviour lasts for as long + * XXX should be changed so that 'first' behaviour lasts for as long * as you have not made a kse in this ksegrp. i.e. as long as we do not have * a mailbox.. */ @@ -1047,7 +1047,6 @@ td2->td_upcall = ku; td2->td_flags = 0; td2->td_pflags = TDP_SA|TDP_UPCALLING; - td2->td_kse = NULL; td2->td_state = TDS_CAN_RUN; td2->td_inhibitors = 0; SIGFILLSET(td2->td_sigmask); @@ -1085,9 +1084,9 @@ PROC_LOCK(p); mtx_lock(&ps->ps_mtx); } - -void -thread_switchout(struct thread *td) +#include "opt_sched.h" +struct thread * +thread_switchout(struct thread *td, int flags, struct thread *nextthread) { struct kse_upcall *ku; struct thread *td2; @@ -1123,8 +1122,20 @@ td->td_upcall = NULL; td->td_pflags &= ~TDP_CAN_UNBIND; td2 = thread_schedule_upcall(td, ku); +#ifdef SCHED_4BSD + if (flags & SW_INVOL || nextthread) { + setrunqueue(td2, SRQ_YIELDING); + } else { + /* Keep up with reality.. we have one extra thread + * in the picture.. and it's 'running'. + */ + return td2; + } +#else setrunqueue(td2, SRQ_YIELDING); +#endif } + return (nextthread); } /* ==== //depot/projects/nsched/sys/kern/kern_proc.c#12 (text+ko) ==== @@ -27,11 +27,11 @@ * SUCH DAMAGE. * * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 - * $FreeBSD: src/sys/kern/kern_proc.c,v 1.215 2004/08/14 17:15:16 rwatson Exp $ + * $FreeBSD: src/sys/kern/kern_proc.c,v 1.216 2004/09/05 02:09:53 julian Exp $ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/kern_proc.c,v 1.215 2004/08/14 17:15:16 rwatson Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/kern_proc.c,v 1.216 2004/09/05 02:09:53 julian Exp $"); #include "opt_ktrace.h" #include "opt_kstack_pages.h" @@ -143,7 +143,9 @@ { struct proc *p; struct thread *td; +#ifdef INVARIANTS struct ksegrp *kg; +#endif /* INVARIANTS checks go here */ p = (struct proc *)mem; @@ -760,13 +762,8 @@ kp->ki_kstack = (void *)td->td_kstack; kp->ki_pctcpu = sched_pctcpu(td); - /* Things in the kse */ -#if 0 - if (ke) - kp->ki_rqindex = ke->ke_rqindex; - else -#endif - kp->ki_rqindex = 0; + /* We can't get this anymore but ps etc never used it anyway. */ + kp->ki_rqindex = 0; } else { kp->ki_stat = SZOMB; ==== //depot/projects/nsched/sys/kern/kern_switch.c#10 (text+ko) ==== @@ -86,7 +86,7 @@ ***/ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: src/sys/kern/kern_switch.c,v 1.85 2004/09/02 23:37:41 julian Exp $"); +__FBSDID("$FreeBSD: src/sys/kern/kern_switch.c,v 1.86 2004/09/05 02:09:53 julian Exp $"); #include "opt_sched.h" @@ -118,6 +118,8 @@ CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); +#define td_kse td_sched + /************************************************************************ * Functions that manipulate runnability from a thread perspective. * ************************************************************************/ @@ -151,7 +153,7 @@ td = ke->ke_thread; KASSERT((td->td_kse == ke), ("kse/thread mismatch")); kg = ke->ke_ksegrp; - if (td->td_proc->p_flag & P_SA) { + if (td->td_proc->p_flag & P_HADTHREADS) { if (kg->kg_last_assigned == td) { kg->kg_last_assigned = TAILQ_PREV(td, threadqueue, td_runq); @@ -185,52 +187,41 @@ } /* - * Given a surplus KSE, either assign a new runable thread to it - * (and put it in the run queue) or put it in the ksegrp's idle KSE list. - * Assumes that the original thread is not runnable. + * Given a surplus system slot, try assign a new runnable thread to it. + * Called from: + * sched_thread_exit() (local) + * sched_switch() (local) + * sched_thread_exit() (local) + * remrunqueue() (local) (commented out) */ -void -kse_reassign(struct kse *ke) +static void +slot_fill(struct ksegrp *kg) { - struct ksegrp *kg; struct thread *td; - struct thread *original; mtx_assert(&sched_lock, MA_OWNED); - original = ke->ke_thread; - KASSERT(original == NULL || TD_IS_INHIBITED(original), - ("reassigning KSE with runnable thread")); - kg = ke->ke_ksegrp; - if (original) - original->td_kse = NULL; + while (kg->kg_avail_opennings > 0) { + /* + * Find the first unassigned thread + */ + if ((td = kg->kg_last_assigned) != NULL) + td = TAILQ_NEXT(td, td_runq); + else + td = TAILQ_FIRST(&kg->kg_runq); - /* - * Find the first unassigned thread - */ - if ((td = kg->kg_last_assigned) != NULL) - td = TAILQ_NEXT(td, td_runq); - else - td = TAILQ_FIRST(&kg->kg_runq); - - /* - * If we found one, assign it the kse, otherwise idle the kse. - */ - if (td) { - kg->kg_last_assigned = td; - td->td_kse = ke; - ke->ke_thread = td; - CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td); - sched_add(td, SRQ_BORING); - return; + /* + * If we found one, send it to the system scheduler. + */ + if (td) { + kg->kg_last_assigned = td; + kg->kg_avail_opennings--; + sched_add(td, SRQ_BORING); + CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg); + } else { + /* no threads to use up the slots. quit now */ + break; + } } - - ke->ke_state = KES_IDLE; - ke->ke_thread = NULL; - TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist); - kg->kg_idle_kses++; - CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke); - sched_check_concurrency(kg); /* could implement directly */ - return; } #if 0 @@ -256,16 +247,17 @@ /* * If it is not a threaded process, take the shortcut. */ - if ((td->td_proc->p_flag & P_SA) == 0) { + if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { /* Bring its kse with it, leave the thread attached */ sched_rem(td); + kg->kg_avail_opennings++; ke->ke_state = KES_THREAD; return; } td3 = TAILQ_PREV(td, threadqueue, td_runq); TAILQ_REMOVE(&kg->kg_runq, td, td_runq); kg->kg_runnable--; - if (ke) { + if (ke->ke_state == KES_ONRUNQ) { /* * This thread has been assigned to a KSE. * We need to dissociate it and try assign the @@ -273,12 +265,13 @@ * see if we need to move the KSE in the run queues. */ sched_rem(td); + kg->kg_avail_opennings++; ke->ke_state = KES_THREAD; td2 = kg->kg_last_assigned; KASSERT((td2 != NULL), ("last assigned has wrong value")); if (td2 == td) kg->kg_last_assigned = td3; - kse_reassign(ke); + slot_fill(kg); } } #endif @@ -300,7 +293,7 @@ /* * If it is not a threaded process, take the shortcut. */ - if ((td->td_proc->p_flag & P_SA) == 0) { + if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { /* We only care about the kse in the run queue. */ td->td_priority = newpri; if (ke->ke_rqindex != (newpri / RQ_PPQ)) { @@ -313,77 +306,67 @@ /* It is a threaded process */ kg = td->td_ksegrp; TD_SET_CAN_RUN(td); - if (ke) { + if (ke->ke_state == KES_ONRUNQ) { if (kg->kg_last_assigned == td) { kg->kg_last_assigned = TAILQ_PREV(td, threadqueue, td_runq); } sched_rem(td); + kg->kg_avail_opennings++; } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); kg->kg_runnable--; td->td_priority = newpri; setrunqueue(td, SRQ_BORING); } - +int limitcount; void setrunqueue(struct thread *td, int flags) { - struct kse *ke; struct ksegrp *kg; struct thread *td2; struct thread *tda; int count; - CTR4(KTR_RUNQ, "setrunqueue: td:%p ke:%p kg:%p pid:%d", - td, td->td_kse, td->td_ksegrp, td->td_proc->p_pid); + CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d", + td, td->td_ksegrp, td->td_proc->p_pid); mtx_assert(&sched_lock, MA_OWNED); KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), ("setrunqueue: bad thread state")); TD_SET_RUNQ(td); kg = td->td_ksegrp; - if ((td->td_proc->p_flag & P_SA) == 0) { + if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { /* * Common path optimisation: Only one of everything * and the KSE is always already attached. * Totally ignore the ksegrp run queue. */ + if (kg->kg_avail_opennings != 1) { + if (limitcount < 100) { + limitcount++; + printf("pid %d: bad slot count (%d)\n", + td->td_proc->p_pid, kg->kg_avail_opennings); + + } + kg->kg_avail_opennings = 1; + } + kg->kg_avail_opennings--; sched_add(td, flags); return; } tda = kg->kg_last_assigned; - if ((ke = td->td_kse) == NULL) { - if (kg->kg_idle_kses) { - /* - * There is a free one so it's ours for the asking.. - */ - ke = TAILQ_FIRST(&kg->kg_iq); - CTR2(KTR_RUNQ, "setrunqueue: kg:%p: Use free ke:%p", - kg, ke); - TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); - ke->ke_state = KES_THREAD; - kg->kg_idle_kses--; - } else if (tda && (tda->td_priority > td->td_priority)) { - /* - * None free, but there is one we can commandeer. - */ - ke = tda->td_kse; - CTR3(KTR_RUNQ, - "setrunqueue: kg:%p: take ke:%p from td: %p", - kg, ke, tda); - sched_rem(tda); - tda->td_kse = NULL; - ke->ke_thread = NULL; - tda = kg->kg_last_assigned = - TAILQ_PREV(tda, threadqueue, td_runq); - } - } else { - /* - * Temporarily disassociate so it looks like the other cases. + if ((kg->kg_avail_opennings <= 0) && + (tda && (tda->td_priority > td->td_priority))) { + /* + * None free, but there is one we can commandeer. */ - ke->ke_thread = NULL; - td->td_kse = NULL; + CTR2(KTR_RUNQ, + "setrunqueue: kg:%p: take slot from td: %p", kg, tda); + sched_rem(tda); + tda = kg->kg_last_assigned = + TAILQ_PREV(tda, threadqueue, td_runq); + kg->kg_avail_opennings++; } /* @@ -410,40 +393,30 @@ } /* - * If we have a ke to use, then put it on the run queue and - * If needed, readjust the last_assigned pointer. + * If we have a slot to use, then put the thread on the system + * run queue and if needed, readjust the last_assigned pointer. */ - if (ke) { + if (kg->kg_avail_opennings > 0) { if (tda == NULL) { /* * No pre-existing last assigned so whoever is first * gets the KSE we brought in.. (maybe us) */ td2 = TAILQ_FIRST(&kg->kg_runq); - KASSERT((td2->td_kse == NULL), - ("unexpected ke present")); - td2->td_kse = ke; - ke->ke_thread = td2; kg->kg_last_assigned = td2; } else if (tda->td_priority > td->td_priority) { - /* - * It's ours, grab it, but last_assigned is past us - * so don't change it. - */ - td->td_kse = ke; - ke->ke_thread = td; + td2 = td; } else { /* * We are past last_assigned, so - * put the new kse on whatever is next, + * gave the next slot to whatever is next, * which may or may not be us. */ td2 = TAILQ_NEXT(tda, td_runq); kg->kg_last_assigned = td2; - td2->td_kse = ke; - ke->ke_thread = td2; } - sched_add(ke->ke_thread, flags); + kg->kg_avail_opennings--; + sched_add(td2, flags); } else { CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", td, td->td_ksegrp, td->td_proc->p_pid); @@ -695,7 +668,6 @@ #if defined(SMP) && defined(SCHED_4BSD) int runq_fuzz = 1; -SYSCTL_DECL(_kern_sched); SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); #endif @@ -772,121 +744,13 @@ /****** functions that are temporarily here ***********/ #include <vm/uma.h> #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) -static uma_zone_t kse_zone; -TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); extern struct mtx kse_zombie_lock; /* - * Initialize type-stable parts of a kse (when newly created). - */ -static int -kse_init(void *mem, int size, int flags) -{ - struct kse *ke; - - ke = (struct kse *)mem; - ke->ke_sched = (struct ke_sched *)&ke[1]; - return (0); -} - -/* - * Allocate a kse. - */ -static struct kse * -kse_alloc(void) -{ - return (uma_zalloc(kse_zone, M_WAITOK)); -} - -/* - * Deallocate a kse. - */ -void -kse_free(struct kse *td) -{ - uma_zfree(kse_zone, td); -} - -/* - * KSE is linked into kse group. - * If we know the thread at this time attach to it, - * otherwise put it on the idle kse queue. - * Called from: - * sched_init_concurrency() schedlock - * sched_set_concurrency() schedlock - * schedinit2() NO schedlock (too early) - */ -static void -kse_link(struct kse *ke, struct ksegrp *kg, struct thread *td) -{ - struct proc *p = kg->kg_proc; - - TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); - kg->kg_kses++; - ke->ke_proc = p; - ke->ke_ksegrp = kg; - ke->ke_oncpu = NOCPU; - ke->ke_flags = 0; - if (td) { - ke->ke_state = KES_THREAD; - td->td_kse = ke; - ke->ke_thread = td; - } else { - TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist); - kg->kg_idle_kses++; - ke->ke_state = KES_IDLE; - ke->ke_thread = NULL; - } -} - -/* - * Stash an embarasingly extra kse into the zombie kse queue. - */ -static void -kse_stash(struct kse *ke) -{ - mtx_lock_spin(&kse_zombie_lock); - TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); - mtx_unlock_spin(&kse_zombie_lock); -} - -/* - * Called from: - * sched_set_concurrency() - * sched_reset_concurrency() - * sched_check_concurrency() - */ -static void -kse_unlink(struct kse *ke) -{ - struct ksegrp *kg; - - mtx_assert(&sched_lock, MA_OWNED); - kg = ke->ke_ksegrp; - TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); - if (ke->ke_state == KES_IDLE) { - TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); - kg->kg_idle_kses--; - } - --kg->kg_kses; - /* - * Aggregate stats from the KSE - */ - kse_stash(ke); -} - - -/* - * Concurrency is implemented using the number of KSEs. - * This will be re-implmented using another method, so - * isolate the details with a simple API. - * Once the API has been implemented, we can switch out the - * underlying implementation. - */ - -/* * Allocate scheduler specific per-process resources. * The thread and ksegrp have already been linked in. + * In this case just set the default concurrency value. + * * Called from: * proc_init() (UMA init method) */ @@ -894,7 +758,8 @@ sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td) { - sched_init_concurrency(kg, td); + /* This can go in sched_fork */ + sched_init_concurrency(kg); } /* @@ -907,210 +772,84 @@ void sched_destroyproc(struct proc *p) { - struct ksegrp *kg; - + + /* this function slated for destruction */ KASSERT((p->p_numthreads == 1), ("Cached proc with > 1 thread ")); KASSERT((p->p_numksegrps == 1), ("Cached proc with > 1 ksegrp ")); - kg = FIRST_KSEGRP_IN_PROC(p); - KASSERT((kg->kg_kses == 1), ("Cached proc with > 1 kse ")); - kse_free(TAILQ_FIRST(&kg->kg_kseq)); } +#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) /* * thread is being either created or recycled. * Fix up the per-scheduler resources associated with it. * Called from: - * thread_dtor() - * thread_init() + * sched_fork_thread() + * thread_dtor() (*may go away) + * thread_init() (*may go away) */ void sched_newthread(struct thread *td) { - td->td_last_kse = NULL; - td->td_kse = NULL; + struct td_sched *ke; + + ke = (struct td_sched *) (td + 1); + bzero(ke, sizeof(*ke)); + td->td_sched = ke; + ke->ke_thread = td; + ke->ke_oncpu = NOCPU; + ke->ke_state = KES_THREAD; } /* - * (Re) assign resources to allow the ksegrp to implement - * the requested concurrency. At this time it means allocating - * or freeing KSE structures. - * We may not remove all the KSEs if there are enough threads in the - * ksegrp to justify them. They will eventually go away as they are added - * to the free kse queue and threads exit. - */ - -/* - * set up an initial concurrency of 1 - * and set the given thread (if given) to be using that + * Set up an initial concurrency of 1 + * and set the given thread (if given) to be using that * concurrency slot. * May be used "offline"..before the ksegrp is attached to the world * and thus wouldn't need schedlock in that case. >>> TRUNCATED FOR MAIL (1000 lines) <<<
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200409052229.i85MTkfo048414>