Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 13 Mar 2012 08:18:54 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r232917 - head/sys/kern
Message-ID:  <201203130818.q2D8IsZI027685@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Tue Mar 13 08:18:54 2012
New Revision: 232917
URL: http://svn.freebsd.org/changeset/base/232917

Log:
  Rewrite thread CPU usage percentage math to not depend on periodic calls
  with HZ rate through the sched_tick() calls from hardclock().
  
  Potentially it can be used to improve precision, but now it is just minus
  one more reason to call hardclock() for every HZ tick on every active CPU.
  SCHED_4BSD never used sched_tick(), but keep it in place for now, as at
  least SCHED_FBFS existing in patches out of the tree depends on it.
  
  MFC after:	1 month

Modified:
  head/sys/kern/sched_ule.c

Modified: head/sys/kern/sched_ule.c
==============================================================================
--- head/sys/kern/sched_ule.c	Tue Mar 13 06:50:56 2012	(r232916)
+++ head/sys/kern/sched_ule.c	Tue Mar 13 08:18:54 2012	(r232917)
@@ -99,7 +99,6 @@ struct td_sched {	
 	u_int		ts_slptime;	/* Number of ticks we vol. slept */
 	u_int		ts_runtime;	/* Number of ticks we were running */
 	int		ts_ltick;	/* Last tick that we were running on */
-	int		ts_incrtick;	/* Last tick that we incremented on */
 	int		ts_ftick;	/* First tick that we were running on */
 	int		ts_ticks;	/* Tick count */
 #ifdef KTR
@@ -291,7 +290,7 @@ static void sched_thread_priority(struct
 static int sched_interact_score(struct thread *);
 static void sched_interact_update(struct thread *);
 static void sched_interact_fork(struct thread *);
-static void sched_pctcpu_update(struct td_sched *);
+static void sched_pctcpu_update(struct td_sched *, int);
 
 /* Operations on per processor queues */
 static struct thread *tdq_choose(struct tdq *);
@@ -671,7 +670,7 @@ cpu_search(const struct cpu_group *cg, s
 			}
 		}
 		if (match & CPU_SEARCH_HIGHEST)
-			if (hgroup.cs_load != -1 &&
+			if (hgroup.cs_load >= 0 &&
 			    (load > hload ||
 			     (load == hload && hgroup.cs_load > high->cs_load))) {
 				hload = load;
@@ -1590,24 +1589,21 @@ sched_rr_interval(void)
  * mechanism since it happens with less regular and frequent events.
  */
 static void
-sched_pctcpu_update(struct td_sched *ts)
+sched_pctcpu_update(struct td_sched *ts, int run)
 {
+	int t = ticks;
 
-	if (ts->ts_ticks == 0)
-		return;
-	if (ticks - (hz / 10) < ts->ts_ltick &&
-	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
-		return;
-	/*
-	 * Adjust counters and watermark for pctcpu calc.
-	 */
-	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
-		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
-			    SCHED_TICK_TARG;
-	else
+	if (t - ts->ts_ltick >= SCHED_TICK_TARG) {
 		ts->ts_ticks = 0;
-	ts->ts_ltick = ticks;
-	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
+		ts->ts_ftick = t - SCHED_TICK_TARG;
+	} else if (t - ts->ts_ftick >= SCHED_TICK_MAX) {
+		ts->ts_ticks = (ts->ts_ticks / (ts->ts_ltick - ts->ts_ftick)) *
+		    (ts->ts_ltick - (t - SCHED_TICK_TARG));
+		ts->ts_ftick = t - SCHED_TICK_TARG;
+	}
+	if (run)
+		ts->ts_ticks += (t - ts->ts_ltick) << SCHED_TICK_SHIFT;
+	ts->ts_ltick = t;
 }
 
 /*
@@ -1826,6 +1822,7 @@ sched_switch(struct thread *td, struct t
 	tdq = TDQ_CPU(cpuid);
 	ts = td->td_sched;
 	mtx = td->td_lock;
+	sched_pctcpu_update(ts, 1);
 	ts->ts_rltick = ticks;
 	td->td_lastcpu = td->td_oncpu;
 	td->td_oncpu = NOCPU;
@@ -1880,6 +1877,7 @@ sched_switch(struct thread *td, struct t
 #endif
 		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
 		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
+		sched_pctcpu_update(newtd->td_sched, 0);
 
 #ifdef KDTRACE_HOOKS
 		/*
@@ -1974,12 +1972,9 @@ sched_wakeup(struct thread *td)
 	slptick = td->td_slptick;
 	td->td_slptick = 0;
 	if (slptick && slptick != ticks) {
-		u_int hzticks;
-
-		hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
-		ts->ts_slptime += hzticks;
+		ts->ts_slptime += (ticks - slptick) << SCHED_TICK_SHIFT;
 		sched_interact_update(td);
-		sched_pctcpu_update(ts);
+		sched_pctcpu_update(ts, 0);
 	}
 	/* Reset the slice value after we sleep. */
 	ts->ts_slice = sched_slice;
@@ -1994,6 +1989,7 @@ void
 sched_fork(struct thread *td, struct thread *child)
 {
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
+	sched_pctcpu_update(td->td_sched, 1);
 	sched_fork_thread(td, child);
 	/*
 	 * Penalize the parent and child for forking.
@@ -2029,7 +2025,6 @@ sched_fork_thread(struct thread *td, str
 	 */
 	ts2->ts_ticks = ts->ts_ticks;
 	ts2->ts_ltick = ts->ts_ltick;
-	ts2->ts_incrtick = ts->ts_incrtick;
 	ts2->ts_ftick = ts->ts_ftick;
 	/*
 	 * Do not inherit any borrowed priority from the parent.
@@ -2186,6 +2181,7 @@ sched_clock(struct thread *td)
 			tdq->tdq_ridx = tdq->tdq_idx;
 	}
 	ts = td->td_sched;
+	sched_pctcpu_update(ts, 1);
 	if (td->td_pri_class & PRI_FIFO_BIT)
 		return;
 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
@@ -2210,31 +2206,12 @@ sched_clock(struct thread *td)
 }
 
 /*
- * Called once per hz tick.  Used for cpu utilization information.  This
- * is easier than trying to scale based on stathz.
+ * Called once per hz tick.
  */
 void
 sched_tick(int cnt)
 {
-	struct td_sched *ts;
 
-	ts = curthread->td_sched;
-	/*
-	 * Ticks is updated asynchronously on a single cpu.  Check here to
-	 * avoid incrementing ts_ticks multiple times in a single tick.
-	 */
-	if (ts->ts_incrtick == ticks)
-		return;
-	/* Adjust ticks for pctcpu */
-	ts->ts_ticks += cnt << SCHED_TICK_SHIFT;
-	ts->ts_ltick = ticks;
-	ts->ts_incrtick = ticks;
-	/*
-	 * Update if we've exceeded our desired tick threshold by over one
-	 * second.
-	 */
-	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
-		sched_pctcpu_update(ts);
 }
 
 /*
@@ -2276,7 +2253,6 @@ sched_choose(void)
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
 	td = tdq_choose(tdq);
 	if (td) {
-		td->td_sched->ts_ltick = ticks;
 		tdq_runq_rem(tdq, td);
 		tdq->tdq_lowpri = td->td_priority;
 		return (td);
@@ -2422,10 +2398,10 @@ sched_pctcpu(struct thread *td)
 		return (0);
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
+	sched_pctcpu_update(ts, TD_IS_RUNNING(td));
 	if (ts->ts_ticks) {
 		int rtick;
 
-		sched_pctcpu_update(ts);
 		/* How many rtick per second ? */
 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201203130818.q2D8IsZI027685>