From owner-p4-projects@FreeBSD.ORG Sun Oct 21 11:56:14 2007 Return-Path: Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 3F09916A468; Sun, 21 Oct 2007 11:56:14 +0000 (UTC) Delivered-To: perforce@FreeBSD.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id C2EEE16A418 for ; Sun, 21 Oct 2007 11:56:13 +0000 (UTC) (envelope-from zec@FreeBSD.org) Received: from repoman.freebsd.org (repoman.freebsd.org [IPv6:2001:4f8:fff6::29]) by mx1.freebsd.org (Postfix) with ESMTP id A619813C49D for ; Sun, 21 Oct 2007 11:56:13 +0000 (UTC) (envelope-from zec@FreeBSD.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.14.1/8.14.1) with ESMTP id l9LBuDod055955 for ; Sun, 21 Oct 2007 11:56:13 GMT (envelope-from zec@FreeBSD.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.14.1/8.14.1/Submit) id l9LBuDMd055952 for perforce@freebsd.org; Sun, 21 Oct 2007 11:56:13 GMT (envelope-from zec@FreeBSD.org) Date: Sun, 21 Oct 2007 11:56:13 GMT Message-Id: <200710211156.l9LBuDMd055952@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to zec@FreeBSD.org using -f From: Marko Zec To: Perforce Change Reviews Cc: Subject: PERFORCE change 127888 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 21 Oct 2007 11:56:14 -0000 http://perforce.freebsd.org/chv.cgi?CH=127888 Change 127888 by zec@zec_tca51 on 2007/10/21 11:55:17 Attempt to fix per-procgroup system load average accounting and computation, for options VIMAGE + options SCHED_ULE + options SMP builds. So far this seems to work OK on a hyperthreaded CPU with a single core (a Pentium 4), but needs yet to be tested on true multicore / multiprocessor systems. NB per-procgroup system load avg accounting works OK with SCHED_4BSD on both UP and SMP builds. Affected files ... .. //depot/projects/vimage/src/sys/kern/sched_ule.c#17 edit .. //depot/projects/vimage/src/sys/sys/vimage.h#46 edit Differences ... ==== //depot/projects/vimage/src/sys/kern/sched_ule.c#17 (text+ko) ==== @@ -483,12 +483,15 @@ (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) { #ifdef SMP tdq->tdq_group->tdg_load++; -#else +#ifdef VIMAGE + V_tdq_sysload[TDG_ID(tdq->tdq_group)]++; +#endif +#else /* !SMP */ tdq->tdq_sysload++; -#endif #ifdef VIMAGE - V_tdq_load[curcpu]++; + V_tdq_sysload[0]++; #endif +#endif /* SMP */ } } @@ -512,12 +515,15 @@ (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) { #ifdef SMP tdq->tdq_group->tdg_load--; -#else +#ifdef VIMAGE + V_tdq_sysload[TDG_ID(tdq->tdq_group)]--; +#endif +#else /* !SMP */ tdq->tdq_sysload--; -#endif #ifdef VIMAGE - V_tdq_load[curcpu]--; + V_tdq_sysload[0]--; #endif +#endif /* SMP */ } KASSERT(tdq->tdq_load != 0, ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); @@ -2613,26 +2619,34 @@ #ifndef VIMAGE total += TDQ_GROUP(i)->tdg_load; #else - total += V_tdg_load[i]; + total += V_tdq_sysload[i]; #endif return (total); -#else +#else /* !SMP */ #ifndef VIMAGE return (TDQ_SELF()->tdq_sysload); #else - return (V_tdq_load[0]); + return (V_tdq_sysload[0]); #endif -#endif +#endif /* SMP */ } #ifdef VIMAGE void sched_load_reassign(struct vprocg *old, struct vprocg *new) { +#ifdef SMP + int tdg_id; + critical_enter(); - old->_tdq_load[curcpu]--; - new->_tdq_load[curcpu]++; + tdg_id = TDG_ID(tdq_cpu[curcpu].tdq_group); + old->_tdq_sysload[tdg_id]--; + new->_tdq_sysload[tdg_id]++; critical_exit(); +#else + old->_tdq_sysload[0]--; + new->_tdq_sysload[0]++; +#endif } #endif ==== //depot/projects/vimage/src/sys/sys/vimage.h#46 (text+ko) ==== @@ -324,7 +324,7 @@ #define V_morphing_symlinks VPROCG(morphing_symlinks) #define V_averunnable VPROCG(averunnable) #define V_sched_tdcnt VPROCG(sched_tdcnt) -#define V_tdq_load VPROCG(tdq_load) +#define V_tdq_sysload VPROCG(tdq_sysload) #define V_acc_statcalls VCPU(acc_statcalls) #define V_avg1_fixp VCPU(avg1_fixp) @@ -407,7 +407,7 @@ struct loadavg _averunnable; /* from kern/kern_synch.c */ int _sched_tdcnt; /* from kern/sched_4bsd.c */ - int _tdq_load[32]; /* XXX MAXCPUS from kern/sched_ule.c (SMP) */ + int _tdq_sysload[32]; /* XXX MAXCPUS from kern/sched_ule.c (SMP) */ #if 0 u_int proc_limit; /* max. number of processes */