From owner-p4-projects Sat Jun 22 16:22:29 2002 Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id DFE1437B403; Sat, 22 Jun 2002 16:21:49 -0700 (PDT) Delivered-To: perforce@freebsd.org Received: from freefall.freebsd.org (freefall.FreeBSD.org [216.136.204.21]) by hub.freebsd.org (Postfix) with ESMTP id 8E61D37B404 for ; Sat, 22 Jun 2002 16:21:47 -0700 (PDT) Received: (from perforce@localhost) by freefall.freebsd.org (8.11.6/8.11.6) id g5MNLlV69192 for perforce@freebsd.org; Sat, 22 Jun 2002 16:21:47 -0700 (PDT) (envelope-from julian@freebsd.org) Date: Sat, 22 Jun 2002 16:21:47 -0700 (PDT) Message-Id: <200206222321.g5MNLlV69192@freefall.freebsd.org> X-Authentication-Warning: freefall.freebsd.org: perforce set sender to julian@freebsd.org using -f From: Julian Elischer Subject: PERFORCE change 13290 for review To: Perforce Change Reviews Sender: owner-p4-projects@FreeBSD.ORG Precedence: bulk List-ID: List-Archive: (Web Archive) List-Help: (List Instructions) List-Subscribe: List-Unsubscribe: X-Loop: FreeBSD.ORG http://people.freebsd.org/~peter/p4db/chv.cgi?CH=13290 Change 13290 by julian@julian_ref on 2002/06/22 16:21:15 Checking AC KBLRU in BLURP GAK stuff. - Bill the cat Affected files ... ... //depot/projects/kse/sys/i386/i386/trap.c#53 edit ... //depot/projects/kse/sys/kern/init_main.c#43 edit ... //depot/projects/kse/sys/kern/kern_fork.c#72 edit ... //depot/projects/kse/sys/kern/kern_idle.c#14 edit ... //depot/projects/kse/sys/kern/kern_intr.c#25 edit ... //depot/projects/kse/sys/kern/kern_mutex.c#28 edit ... //depot/projects/kse/sys/kern/kern_proc.c#72 edit ... //depot/projects/kse/sys/kern/kern_switch.c#53 edit ... //depot/projects/kse/sys/kern/kern_synch.c#68 edit ... //depot/projects/kse/sys/kern/kern_thread.c#75 edit ... //depot/projects/kse/sys/kern/subr_trap.c#70 edit ... //depot/projects/kse/sys/sys/proc.h#112 edit ... //depot/projects/kse/sys/vm/uma_int.h#5 edit Differences ... ==== //depot/projects/kse/sys/i386/i386/trap.c#53 (text+ko) ==== @@ -951,6 +951,8 @@ mtx_unlock(&Giant); } #endif + KASSERT((td->td_kse != NULL), ("syscall: kse/thread UNLINKED")); + KASSERT((td->td_kse->ke_thread == td), ("syscall:kse/thread mismatch")); sticks = td->td_kse->ke_sticks; td->td_frame = &frame; ==== //depot/projects/kse/sys/kern/init_main.c#43 (text+ko) ==== @@ -349,6 +349,8 @@ kg->kg_idle_kses--; p->p_peers = 0; p->p_leader = p; +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); + bcopy("swapper", p->p_comm, sizeof ("swapper")); ==== //depot/projects/kse/sys/kern/kern_fork.c#72 (text+ko) ==== @@ -508,12 +508,13 @@ proc_linkup(p2, kg2, ke2, td2); /* Set up the thread as an active thread (as if runnable). */ + TAILQ_REMOVE(&kg2->kg_iq, ke2, ke_kgrlist); + kg2->kg_idle_kses--; + ke2->ke_state = KES_UNQUEUED; ke2->ke_thread = td2; td2->td_kse = ke2; - TAILQ_REMOVE(&kg2->kg_iq, ke2, ke_kgrlist); - ke2->ke_state = KES_UNQUEUED; - kg2->kg_idle_kses--; td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */ +KASSERT((ke2->ke_kgrlist.tqe_next != ke2), ("linked to self!")); /* note.. XXXKSE no pcb or u-area yet */ @@ -834,6 +835,7 @@ td->td_kse->ke_oncpu = PCPU_GET(cpuid); p->p_state = PRS_NORMAL; td->td_state = TDS_RUNNING; /* Already done in switch() on 386. */ + td->td_kse->ke_state = KES_RUNNING; /* * Finish setting up thread glue. We need to initialize * the thread into a td_critnest=1 state. Some platforms ==== //depot/projects/kse/sys/kern/kern_idle.c#14 (text+ko) ==== @@ -40,6 +40,7 @@ struct pcpu *pc; #endif struct proc *p; + struct thread *td; int error; #ifdef SMP @@ -60,7 +61,10 @@ panic("idle_setup: kthread_create error %d\n", error); p->p_flag |= P_NOLOAD; - FIRST_THREAD_IN_PROC(p)->td_state = TDS_RUNQ; /* XXXKSE */ + td = FIRST_THREAD_IN_PROC(p); + td->td_state = TDS_RUNQ; + td->td_kse->ke_state = KES_ONRUNQ; + td->td_kse->ke_flags |= KEF_IDLEKSE; #ifdef SMP } #endif @@ -75,7 +79,13 @@ #ifdef DIAGNOSTIC int count; #endif + struct thread *td; + struct proc *p; + td = curthread; + p = td->td_proc; + td->td_state = TDS_RUNNING; + td->td_kse->ke_state = KES_RUNNING; for (;;) { mtx_assert(&Giant, MA_NOTOWNED); @@ -103,9 +113,9 @@ } mtx_lock_spin(&sched_lock); - curthread->td_proc->p_stats->p_ru.ru_nvcsw++; - curthread->td_state = TDS_RUNQ; /* Pretend we are on the runq */ + p->p_stats->p_ru.ru_nvcsw++; mi_switch(); + td->td_kse->ke_state = KES_RUNNING; mtx_unlock_spin(&sched_lock); } } ==== //depot/projects/kse/sys/kern/kern_intr.c#25 (text+ko) ==== @@ -390,10 +390,12 @@ CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid); setrunqueue(td); if (do_switch && - (curthread->td_critnest == 1) && - (curthread->td_state == TDS_RUNNING)) { + (curthread->td_critnest == 1)/* && + (curthread->td_state == TDS_RUNNING) XXXKSE*/) { +#if 0 /* not needed in KSE */ if (curthread != PCPU_GET(idlethread)) setrunqueue(curthread); +#endif curthread->td_proc->p_stats->p_ru.ru_nivcsw++; mi_switch(); } else { ==== //depot/projects/kse/sys/kern/kern_mutex.c#28 (text+ko) ==== ==== //depot/projects/kse/sys/kern/kern_proc.c#72 (text+ko) ==== @@ -120,8 +120,10 @@ { struct proc *p = kg->kg_proc; +KASSERT((ke->ke_state != KES_ONRUNQ), ("linking suspect kse on run queue")); TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); kg->kg_kses++; +KASSERT((ke->ke_state != KES_IDLE), ("already on idle queue")); ke->ke_state = KES_IDLE; TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); kg->kg_idle_kses++; ==== //depot/projects/kse/sys/kern/kern_switch.c#53 (text+ko) ==== @@ -126,18 +126,22 @@ if ((ke = runq_choose(&runq))) { td = ke->ke_thread; + KASSERT((td->td_kse == ke), ("kse/thread mismatch")); + kg = ke->ke_ksegrp; if (td->td_flags & TDF_UNBOUND) { - kg = ke->ke_ksegrp; TAILQ_REMOVE(&kg->kg_runq, td, td_runq); if (kg->kg_last_assigned == td) kg->kg_last_assigned = NULL; } + kg->kg_runnable--; CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", td, td->td_priority); } else { /* Pretend the idle thread was on the run queue. */ td = PCPU_GET(idlethread); + /* Simulate that it was on the run queue */ td->td_state = TDS_RUNQ; + td->td_kse->ke_state = KES_UNQUEUED; CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); } return (td); @@ -156,6 +160,7 @@ kg = ke->ke_ksegrp; +KASSERT((ke->ke_state != KES_ONRUNQ), ("kse_reassigning non-free kse")); /* * Find the first unassigned thread * If there is a 'last assigned' then see what's next. @@ -177,10 +182,13 @@ runq_add(&runq, ke); CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td); } else { + KASSERT((ke->ke_state != KES_IDLE), ("kse already idle")); +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); ke->ke_state = KES_IDLE; TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); kg->kg_idle_kses++; CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke); +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); } } @@ -234,6 +242,7 @@ struct ksegrp *kg; struct kse *ke; + mtx_assert(&sched_lock, MA_OWNED); if (td->td_state != TDS_RUNQ) { panic("remrunqueue: Bad state on run queue"); /* NOTREACHED */ @@ -289,14 +298,21 @@ kg->kg_last_assigned = TAILQ_PREV(td, threadlist_head, td_runq); } +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); + KASSERT((ke->ke_state != KES_IDLE), + ("kse already idle")); ke->ke_state = KES_IDLE; +KASSERT((TAILQ_FIRST(&kg->kg_iq) != ke), ("really bad screwup")); TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); kg->kg_idle_kses++; +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self2!")); } } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); } +#if 1 /* use the first version */ + void setrunqueue(struct thread *td) { @@ -305,11 +321,14 @@ struct thread *td2; CTR1(KTR_RUNQ, "setrunqueue: td%p", td); + mtx_assert(&sched_lock, MA_OWNED); KASSERT((td->td_state != TDS_RUNQ), ("setrunqueue: bad thread state")); td->td_state = TDS_RUNQ; kg = td->td_ksegrp; kg->kg_runnable++; if ((td->td_flags & TDF_UNBOUND) == 0) { + KASSERT((td->td_kse != NULL), + ("queueing BAD thread to run queue")); /* * Common path optimisation: Only one of everything * and the KSE is always already attached. @@ -318,7 +337,98 @@ runq_add(&runq, td->td_kse); return; } + /* + * Ok, so we are threading with this thread. + * quickly disassociate the KSE if we have one.. + */ + if ((ke = td->td_kse) == NULL) { + /* + * see if we will need a KSE and if there is one.. + * First look for a free one, before getting desperate. + */ + if (kg->kg_idle_kses) { + /* + * There is a free one so it's ours for the asking.. + */ + ke = TAILQ_FIRST(&kg->kg_iq); +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); + TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); + ke->ke_state = KES_UNQUEUED; + kg->kg_idle_kses--; + ke->ke_thread = td; + td->td_kse = ke; +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); + } else if ((kg->kg_last_assigned) && + (kg->kg_last_assigned->td_priority > td->td_priority)) { + /* + * None free, but there is one we can commandeer. + */ + td2 = kg->kg_last_assigned; + kg->kg_last_assigned = + TAILQ_PREV(td2, threadlist_head, td_runq); + ke = td2->td_kse; + runq_remove(&runq, ke); + ke->ke_thread = td; + td2->td_kse = NULL; + td->td_kse = ke; +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); + } + } + /* + * Add the thread to the ksegrp's run queue at + * the appropriate place. + */ + TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { + if (td2->td_priority > td->td_priority) { + TAILQ_INSERT_BEFORE(td2, td, td_runq); + break; + } + } + if (td2 == NULL) { + /* We ran off the end of the TAILQ or it was empty. */ + TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); + } + + /* + * If we have a ke to use, then put it on the run queue and + * If needed, readjust the last_assigned pointer. + */ + if (ke) { + ke->ke_thread = td; + td->td_kse = ke; + if (kg->kg_last_assigned == + TAILQ_PREV(td, threadlist_head, td_runq)) { + kg->kg_last_assigned = td; + } + runq_add(&runq, ke); + } +} + +#else + +void +setrunqueue(struct thread *td) +{ + struct kse *ke; + struct ksegrp *kg; + struct thread *td2; + + CTR1(KTR_RUNQ, "setrunqueue: td%p", td); + KASSERT((td->td_state != TDS_RUNQ), ("setrunqueue: bad thread state")); + td->td_state = TDS_RUNQ; + kg = td->td_ksegrp; + kg->kg_runnable++; + if ((td->td_flags & TDF_UNBOUND) == 0) { + /* + * Common path optimisation: Only one of everything + * and the KSE is always already attached. + * Totally ignore the ksegrp run queue. + */ + runq_add(&runq, td->td_kse); + return; + } + /* * First add the thread to the ksegrp's run queue at * the appropriate place. */ @@ -379,12 +489,15 @@ * assigned" pointer set to us as well. */ ke = TAILQ_FIRST(&kg->kg_iq); +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); ke->ke_state = KES_UNQUEUED; kg->kg_idle_kses--; +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); ke->ke_thread = td; td->td_kse = ke; runq_add(&runq, ke); +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); if (TAILQ_NEXT(td, td_runq) == NULL) { kg->kg_last_assigned = td; } @@ -400,14 +513,17 @@ */ td2 = kg->kg_last_assigned; ke = td2->td_kse; +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); kg->kg_last_assigned = TAILQ_PREV(td2, threadlist_head, td_runq); td2->td_kse = NULL; td->td_kse = ke; ke->ke_thread = td; runq_readjust(&runq, ke); +KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!")); } } +#endif /************************************************************************ * Critical section marker functions * @@ -586,6 +702,9 @@ KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); CTR3(KTR_RUNQ, "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); +KASSERT(ke->ke_procq.tqe_prev != NULL, ("no prev")); +if (ke->ke_procq.tqe_next) + KASSERT(ke->ke_procq.tqe_next->ke_procq.tqe_prev != NULL, ("no next")); TAILQ_REMOVE(rqh, ke, ke_procq); ke->ke_ksegrp->kg_runq_kses--; if (TAILQ_EMPTY(rqh)) { ==== //depot/projects/kse/sys/kern/kern_synch.c#68 (text+ko) ==== @@ -420,6 +420,9 @@ * entered before msleep returns. If priority includes the PDROP * flag the mutex is not entered before returning. */ + +struct thread *TD1; + int msleep(ident, mtx, priority, wmesg, timo) void *ident; @@ -437,6 +440,8 @@ if (KTRPOINT(td, KTR_CSW)) ktrcsw(1, 0); #endif + KASSERT((td->td_kse != NULL), ("msleep: NULL KSE?")); + KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse state?")); WITNESS_SLEEP(0, &mtx->mtx_object); KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL, ("sleeping without a mutex")); @@ -453,6 +458,7 @@ and not the exiting thread. */ if ((p->p_flag & P_WEXIT) && catch && p->p_singlethread != td) return (EINTR); + TD1 = NULL; if (td->td_mailbox && (!(td->td_flags & TDF_INMSLEEP))) { /* * If we have no queued work to do, then @@ -461,14 +467,22 @@ * queue it. */ if (TAILQ_FIRST(&td->td_ksegrp->kg_runq) == NULL) { + TD1 = td; /* Don't recurse here! */ mtx_lock_spin(&sched_lock); + KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse stateX?")); td->td_flags |= TDF_INMSLEEP; thread_schedule_upcall(td, td->td_kse); td->td_flags &= ~TDF_INMSLEEP; + KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse stateY?")); mtx_unlock_spin(&sched_lock); } } + KASSERT((td->td_kse != NULL), ("msleep: NULL KSE2?")); + KASSERT((td->td_kse->ke_state == KES_RUNNING), + ("msleep: kse state2?")); + KASSERT((td->td_kse->ke_thread == td), + ("msleep: kse/thread mismatch?")); } mtx_lock_spin(&sched_lock); if (cold || panicstr) { @@ -767,7 +781,7 @@ { struct bintime new_switchtime; struct thread *td = curthread; /* XXX */ - register struct proc *p = td->td_proc; /* XXX */ + struct proc *p = td->td_proc; /* XXX */ struct kse *ke = td->td_kse; #if 0 register struct rlimit *rlim; @@ -775,6 +789,7 @@ u_int sched_nest; mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); + KASSERT((ke->ke_state == KES_RUNNING), ("mi_switch: kse state?")); #ifdef INVARIANTS if (td->td_state != TDS_MTX && td->td_state != TDS_RUNQ && @@ -846,10 +861,12 @@ * At the last moment: if this KSE is not on the run queue, * it needs to be freed correctly and the thread treated accordingly. */ - if (td->td_state == TDS_RUNNING) { + if ((td->td_state == TDS_RUNNING) && + ((ke->ke_flags & KEF_IDLEKSE) == 0)) { /* Put us back on the run queue (kse and all). */ setrunqueue(td); - } else if ((td->td_flags & TDF_UNBOUND) && (td->td_state != TDS_RUNQ)) { + } else if ((td->td_flags & TDF_UNBOUND) && + (td->td_state != TDS_RUNQ)) { /* in case of old code */ /* * We will not be on the run queue. * Someone else can use the KSE if they need it. @@ -859,6 +876,7 @@ } cpu_switch(); td->td_kse->ke_oncpu = PCPU_GET(cpuid); + td->td_kse->ke_state = KES_RUNNING; sched_lock.mtx_recurse = sched_nest; sched_lock.mtx_lock = (uintptr_t)td; CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid, ==== //depot/projects/kse/sys/kern/kern_thread.c#75 (text+ko) ==== @@ -313,6 +313,8 @@ /* Reassign this thread's KSE. */ if (ke != NULL) { +KASSERT((ke->ke_state != KES_ONRUNQ), ("zapping kse on run queue")); +KASSERT((ke->ke_thread->td_state != TDS_RUNQ), ("zapping thread on run queue")); ke->ke_thread = NULL; td->td_kse = NULL; ke->ke_state = KES_UNQUEUED; @@ -401,7 +403,6 @@ thread_link(td2, ke->ke_ksegrp); cpu_set_upcall(td2, ke->ke_pcb); td2->td_ucred = crhold(td->td_ucred); - td2->td_kse = NULL; /* Back as it was. */ td2->td_flags = TDF_UNBOUND|TDF_UPCALLING; td2->td_priority = td->td_priority; setrunqueue(td2); ==== //depot/projects/kse/sys/kern/subr_trap.c#70 (text+ko) ==== @@ -341,7 +341,6 @@ if (flags & KEF_NEEDRESCHED) { mtx_lock_spin(&sched_lock); td->td_priority = kg->kg_user_pri; - setrunqueue(td); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); mtx_unlock_spin(&sched_lock); ==== //depot/projects/kse/sys/sys/proc.h#112 (text+ko) ==== @@ -380,6 +380,7 @@ /* flags kept in ke_flags */ #define KEF_ONRUNQ 0x00001 /* This KE is on a run queue */ #define KEF_OWEUPC 0x00002 /* Owe process an addupc() call at next ast. */ +#define KEF_IDLEKSE 0x00004 /* A 'Per CPU idle process'.. has one thread */ #define KEF_ASTPENDING 0x00400 /* KSE has a pending ast. */ #define KEF_NEEDRESCHED 0x00800 /* Process needs to yield. */ ==== //depot/projects/kse/sys/vm/uma_int.h#5 (text+ko) ==== @@ -109,7 +109,7 @@ #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ -#define UMA_BOOT_PAGES 15 /* Number of pages allocated for startup */ +#define UMA_BOOT_PAGES 30 /* Number of pages allocated for startup */ #define UMA_WORKING_TIME 20 /* Seconds worth of items to keep */ To Unsubscribe: send mail to majordomo@FreeBSD.org with "unsubscribe p4-projects" in the body of the message