Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 18 Jun 2025 02:13:11 GMT
From:      Olivier Certner <olce@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: a11926f2a5f0 - main - runq: API tidy up: 'pri' => 'idx', 'idx' as int, remove runq_remove_idx()
Message-ID:  <202506180213.55I2DBsU024029@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by olce:

URL: https://cgit.FreeBSD.org/src/commit/?id=a11926f2a5f00b57ebff5bd548c9904b7f6e5800

commit a11926f2a5f00b57ebff5bd548c9904b7f6e5800
Author:     Olivier Certner <olce@FreeBSD.org>
AuthorDate: 2024-02-27 17:44:12 +0000
Commit:     Olivier Certner <olce@FreeBSD.org>
CommitDate: 2025-06-18 02:07:57 +0000

    runq: API tidy up: 'pri' => 'idx', 'idx' as int, remove runq_remove_idx()
    
    Make sure that external and internal users are aware that the runqueue
    API always expects queue indices, and not priority levels.  Name
    arithmetic arguments in 'runq.h' for better immediate reference.
    
    Use plain integers to pass indices instead of 'u_char' (using the latter
    probably doesn't bring any gain, and an 'int' makes the API agnostic to
    a number of queues greater than 256).  Add a static assertion that
    RQ_NQS can't be strictly greater than 256 as long as the 'td_rqindex'
    thread field is of type 'u_char'.
    
    Add a new macro CHECK_IDX() that checks that an index is non-negative
    and below RQ_NQS, and use it in all low-level functions (and "public"
    ones when they don't need to call the former).
    
    While here, remove runq_remove_idx(), as it knows a bit too much of
    ULE's internals, in particular by treating the whole runqueue as
    round-robin, which we are going to change.  Instead, have runq_remove()
    return whether the queue from which the thread was removed is now empty,
    and leverage this information in tdq_runq_rem() (sched_ule(4)).
    
    While here, re-implement runq_add() on top of runq_add_idx() to remove
    its duplicated code (all lines except one).  Introduce the new
    RQ_PRI_TO_IDX() macro to convert a priority to a queue index, and use it
    in runq_add() (many more uses will be introduced in later commits).
    
    While here, rename runq_check() to runq_not_empty() and have it return
    a boolean instead of an 'int', and same for sched_runnable() as an
    impact (and while here, fix a small style violation in sched_4bsd(4)'s
    version).
    
    While here, simplify sched_runnable().
    
    While here, make <sys/sched.h> standalone include-wise.
    
    No functional change intended.
    
    Reviewed by:    kib
    MFC after:      1 month
    Event:          Kitchener-Waterloo Hackathon 202506
    Sponsored by:   The FreeBSD Foundation
    Differential Revision:  https://reviews.freebsd.org/D45387
---
 sys/kern/kern_switch.c | 199 ++++++++++++++++++++++++-------------------------
 sys/kern/sched_4bsd.c  |  11 +--
 sys/kern/sched_ule.c   |  37 ++++-----
 sys/sys/runq.h         |  16 ++--
 sys/sys/sched.h        |  11 ++-
 5 files changed, 137 insertions(+), 137 deletions(-)

diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 58dfc8fc8497..5c529862bc53 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -252,6 +252,15 @@ critical_exit_KBI(void)
 /************************************************************************
  * SYSTEM RUN QUEUE manipulations and tests				*
  ************************************************************************/
+_Static_assert(RQ_NQS <= 256,
+    "'td_rqindex' must be turned into a bigger unsigned type");
+/* A macro instead of a function to get the proper calling function's name. */
+#define CHECK_IDX(idx) ({						\
+	__typeof(idx) _idx __unused = (idx);					\
+	KASSERT(0 <= _idx && _idx < RQ_NQS,				\
+	    ("%s: %s out of range: %d", __func__, __STRING(idx), _idx)); \
+})
+
 /*
  * Initialize a run structure.
  */
@@ -266,92 +275,93 @@ runq_init(struct runq *rq)
 }
 
 /*
- * Clear the status bit of the queue corresponding to priority level pri,
- * indicating that it is empty.
+ * Clear the status bit of the queue at index 'idx', indicating that it is
+ * empty.
  */
 static __inline void
-runq_clrbit(struct runq *rq, int pri)
+runq_clrbit(struct runq *rq, int idx)
 {
 	struct rqbits *rqb;
 
+	CHECK_IDX(idx);
 	rqb = &rq->rq_status;
 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
-	    rqb->rqb_bits[RQB_WORD(pri)],
-	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
-	    RQB_BIT(pri), RQB_WORD(pri));
-	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
+	    rqb->rqb_bits[RQB_WORD(idx)],
+	    rqb->rqb_bits[RQB_WORD(idx)] & ~RQB_BIT(idx),
+	    RQB_BIT(idx), RQB_WORD(idx));
+	rqb->rqb_bits[RQB_WORD(idx)] &= ~RQB_BIT(idx);
 }
 
 /*
  * Find the index of the first non-empty run queue.  This is done by
- * scanning the status bits, a set bit indicates a non-empty queue.
+ * scanning the status bits, a set bit indicating a non-empty queue.
  */
 static __inline int
 runq_findbit(struct runq *rq)
 {
 	struct rqbits *rqb;
-	int pri;
-	int i;
+	int idx, i;
 
 	rqb = &rq->rq_status;
 	for (i = 0; i < RQB_LEN; i++)
 		if (rqb->rqb_bits[i]) {
-			pri = RQB_FFS(rqb->rqb_bits[i]) + i * RQB_BPW;
-			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
-			    rqb->rqb_bits[i], i, pri);
-			return (pri);
+			idx = RQB_FFS(rqb->rqb_bits[i]) + i * RQB_BPW;
+			CHECK_IDX(idx);
+			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d idx=%d",
+			    rqb->rqb_bits[i], i, idx);
+			return (idx);
 		}
 
 	return (-1);
 }
 
 static __inline int
-runq_findbit_from(struct runq *rq, u_char pri)
+runq_findbit_from(struct runq *rq, int idx)
 {
 	struct rqbits *rqb;
 	rqb_word_t mask;
 	int i;
 
-	/*
-	 * Set the mask for the first word so we ignore priorities before 'pri'.
-	 */
-	mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
+	CHECK_IDX(idx);
+	/* Set the mask for the first word so we ignore indices before 'idx'. */
+	mask = (rqb_word_t)-1 << (idx & (RQB_BPW - 1));
 	rqb = &rq->rq_status;
 again:
-	for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
+	for (i = RQB_WORD(idx); i < RQB_LEN; mask = -1, i++) {
 		mask = rqb->rqb_bits[i] & mask;
 		if (mask == 0)
 			continue;
-		pri = RQB_FFS(mask) + i * RQB_BPW;
-		CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
-		    mask, i, pri);
-		return (pri);
+		idx = RQB_FFS(mask) + i * RQB_BPW;
+		CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d idx=%d",
+		    mask, i, idx);
+		return (idx);
 	}
-	if (pri == 0)
+	if (idx == 0)
 		return (-1);
 	/*
 	 * Wrap back around to the beginning of the list just once so we
 	 * scan the whole thing.
 	 */
-	pri = 0;
+	idx = 0;
 	goto again;
 }
 
 /*
- * Set the status bit of the queue corresponding to priority level pri,
- * indicating that it is non-empty.
+ * Set the status bit of the queue at index 'idx', indicating that it is
+ * non-empty.
  */
 static __inline void
-runq_setbit(struct runq *rq, int pri)
+runq_setbit(struct runq *rq, int idx)
 {
 	struct rqbits *rqb;
 
+	CHECK_IDX(idx);
 	rqb = &rq->rq_status;
 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
-	    rqb->rqb_bits[RQB_WORD(pri)],
-	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
-	    RQB_BIT(pri), RQB_WORD(pri));
-	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
+	    rqb->rqb_bits[RQB_WORD(idx)],
+	    rqb->rqb_bits[RQB_WORD(idx)] | RQB_BIT(idx),
+	    RQB_BIT(idx), RQB_WORD(idx));
+	rqb->rqb_bits[RQB_WORD(idx)] |= RQB_BIT(idx);
 }
 
 /*
@@ -361,46 +371,38 @@ runq_setbit(struct runq *rq, int pri)
 void
 runq_add(struct runq *rq, struct thread *td, int flags)
 {
-	struct rqhead *rqh;
-	int pri;
-
-	pri = td->td_priority / RQ_PPQ;
-	td->td_rqindex = pri;
-	runq_setbit(rq, pri);
-	rqh = &rq->rq_queues[pri];
-	CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
-	    td, td->td_priority, pri, rqh);
-	if (flags & SRQ_PREEMPTED) {
-		TAILQ_INSERT_HEAD(rqh, td, td_runq);
-	} else {
-		TAILQ_INSERT_TAIL(rqh, td, td_runq);
-	}
+
+	runq_add_idx(rq, td, RQ_PRI_TO_IDX(td->td_priority), flags);
 }
 
 void
-runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
+runq_add_idx(struct runq *rq, struct thread *td, int idx, int flags)
 {
 	struct rqhead *rqh;
 
-	KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
-	td->td_rqindex = pri;
-	runq_setbit(rq, pri);
-	rqh = &rq->rq_queues[pri];
-	CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
-	    td, td->td_priority, pri, rqh);
+	/*
+	 * runq_setbit() asserts 'idx' is non-negative and below 'RQ_NQS', and
+	 * a static assert earlier in this file ensures that 'RQ_NQS' is no more
+	 * than 256.
+	 */
+	td->td_rqindex = idx;
+	runq_setbit(rq, idx);
+	rqh = &rq->rq_queues[idx];
+	CTR4(KTR_RUNQ, "runq_add_idx: td=%p pri=%d idx=%d rqh=%p",
+	    td, td->td_priority, idx, rqh);
 	if (flags & SRQ_PREEMPTED) {
 		TAILQ_INSERT_HEAD(rqh, td, td_runq);
 	} else {
 		TAILQ_INSERT_TAIL(rqh, td, td_runq);
 	}
 }
+
 /*
- * Return true if there are runnable processes of any priority on the run
- * queue, false otherwise.  Has no side effects, does not modify the run
- * queue structure.
+ * Return true if there are some processes of any priority on the run queue,
+ * false otherwise.  Has no side effects.
  */
-int
-runq_check(struct runq *rq)
+bool
+runq_not_empty(struct runq *rq)
 {
 	struct rqbits *rqb;
 	int i;
@@ -408,13 +410,13 @@ runq_check(struct runq *rq)
 	rqb = &rq->rq_status;
 	for (i = 0; i < RQB_LEN; i++)
 		if (rqb->rqb_bits[i]) {
-			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
+			CTR2(KTR_RUNQ, "runq_not_empty: bits=%#x i=%d",
 			    rqb->rqb_bits[i], i);
-			return (1);
+			return (true);
 		}
-	CTR0(KTR_RUNQ, "runq_check: empty");
+	CTR0(KTR_RUNQ, "runq_not_empty: empty");
 
-	return (0);
+	return (false);
 }
 
 /*
@@ -425,10 +427,10 @@ runq_choose_fuzz(struct runq *rq, int fuzz)
 {
 	struct rqhead *rqh;
 	struct thread *td;
-	int pri;
+	int idx;
 
-	while ((pri = runq_findbit(rq)) != -1) {
-		rqh = &rq->rq_queues[pri];
+	while ((idx = runq_findbit(rq)) != -1) {
+		rqh = &rq->rq_queues[idx];
 		/* fuzz == 1 is normal.. 0 or less are ignored */
 		if (fuzz > 1) {
 			/*
@@ -451,10 +453,10 @@ runq_choose_fuzz(struct runq *rq, int fuzz)
 			td = TAILQ_FIRST(rqh);
 		KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
 		CTR3(KTR_RUNQ,
-		    "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
+		    "runq_choose_fuzz: idx=%d thread=%p rqh=%p", idx, td, rqh);
 		return (td);
 	}
-	CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
+	CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc idx=%d", idx);
 
 	return (NULL);
 }
@@ -467,71 +469,64 @@ runq_choose(struct runq *rq)
 {
 	struct rqhead *rqh;
 	struct thread *td;
-	int pri;
+	int idx;
 
-	while ((pri = runq_findbit(rq)) != -1) {
-		rqh = &rq->rq_queues[pri];
+	while ((idx = runq_findbit(rq)) != -1) {
+		rqh = &rq->rq_queues[idx];
 		td = TAILQ_FIRST(rqh);
 		KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
 		CTR3(KTR_RUNQ,
-		    "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
+		    "runq_choose: idx=%d thread=%p rqh=%p", idx, td, rqh);
 		return (td);
 	}
-	CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
+	CTR1(KTR_RUNQ, "runq_choose: idlethread idx=%d", idx);
 
 	return (NULL);
 }
 
 struct thread *
-runq_choose_from(struct runq *rq, u_char idx)
+runq_choose_from(struct runq *rq, int from_idx)
 {
 	struct rqhead *rqh;
 	struct thread *td;
-	int pri;
+	int idx;
 
-	if ((pri = runq_findbit_from(rq, idx)) != -1) {
-		rqh = &rq->rq_queues[pri];
+	if ((idx = runq_findbit_from(rq, from_idx)) != -1) {
+		rqh = &rq->rq_queues[idx];
 		td = TAILQ_FIRST(rqh);
 		KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
 		CTR4(KTR_RUNQ,
-		    "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
-		    pri, td, td->td_rqindex, rqh);
+		    "runq_choose_from: idx=%d thread=%p idx=%d rqh=%p",
+		    idx, td, td->td_rqindex, rqh);
 		return (td);
 	}
-	CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
+	CTR1(KTR_RUNQ, "runq_choose_from: idlethread idx=%d", idx);
 
 	return (NULL);
 }
 /*
  * Remove the thread from the queue specified by its priority, and clear the
  * corresponding status bit if the queue becomes empty.
- * Caller must set state afterwards.
+ *
+ * Returns whether the corresponding queue is empty after removal.
  */
-void
+bool
 runq_remove(struct runq *rq, struct thread *td)
-{
-
-	runq_remove_idx(rq, td, NULL);
-}
-
-void
-runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
 {
 	struct rqhead *rqh;
-	u_char pri;
-
-	KASSERT(td->td_flags & TDF_INMEM,
-		("runq_remove_idx: thread swapped out"));
-	pri = td->td_rqindex;
-	KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
-	rqh = &rq->rq_queues[pri];
-	CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
-	    td, td->td_priority, pri, rqh);
+	int idx;
+
+	KASSERT(td->td_flags & TDF_INMEM, ("runq_remove: Thread swapped out"));
+	idx = td->td_rqindex;
+	CHECK_IDX(idx);
+	rqh = &rq->rq_queues[idx];
+	CTR4(KTR_RUNQ, "runq_remove: td=%p pri=%d idx=%d rqh=%p",
+	    td, td->td_priority, idx, rqh);
 	TAILQ_REMOVE(rqh, td, td_runq);
 	if (TAILQ_EMPTY(rqh)) {
-		CTR0(KTR_RUNQ, "runq_remove_idx: empty");
-		runq_clrbit(rq, pri);
-		if (idx != NULL && *idx == pri)
-			*idx = (pri + 1) % RQ_NQS;
+		runq_clrbit(rq, idx);
+		CTR1(KTR_RUNQ, "runq_remove: queue at idx=%d now empty", idx);
+		return (true);
 	}
+	return (false);
 }
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 133ea4cbf30d..ef49832da017 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -684,13 +684,14 @@ schedinit_ap(void)
 	/* Nothing needed. */
 }
 
-int
+bool
 sched_runnable(void)
 {
 #ifdef SMP
-	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
+	return (runq_not_empty(&runq) ||
+	    runq_not_empty(&runq_pcpu[PCPU_GET(cpuid)]));
 #else
-	return runq_check(&runq);
+	return (runq_not_empty(&runq));
 #endif
 }
 
@@ -872,7 +873,7 @@ sched_priority(struct thread *td, u_char prio)
 	if (td->td_priority == prio)
 		return;
 	td->td_priority = prio;
-	if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
+	if (TD_ON_RUNQ(td) && td->td_rqindex != RQ_PRI_TO_IDX(prio)) {
 		sched_rem(td);
 		sched_add(td, SRQ_BORING | SRQ_HOLDTD);
 	}
@@ -1680,7 +1681,7 @@ sched_idletd(void *dummy)
 	for (;;) {
 		mtx_assert(&Giant, MA_NOTOWNED);
 
-		while (sched_runnable() == 0) {
+		while (!sched_runnable()) {
 			cpu_idle(stat->idlecalls + stat->oldidlecalls > 64);
 			stat->idlecalls++;
 		}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 7d1e188d0b8f..cd508d12395e 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -514,14 +514,14 @@ tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
 				pri = (unsigned char)(pri - 1) % RQ_NQS;
 		} else
 			pri = tdq->tdq_ridx;
-		runq_add_pri(ts->ts_runq, td, pri, flags);
+		runq_add_idx(ts->ts_runq, td, pri, flags);
 		return;
 	} else
 		ts->ts_runq = &tdq->tdq_idle;
 	runq_add(ts->ts_runq, td, flags);
 }
 
-/* 
+/*
  * Remove a thread from a run-queue.  This typically happens when a thread
  * is selected to run.  Running threads are not on the queue and the
  * transferable count does not reflect them.
@@ -530,6 +530,7 @@ static __inline void
 tdq_runq_rem(struct tdq *tdq, struct thread *td)
 {
 	struct td_sched *ts;
+	bool queue_empty;
 
 	ts = td_get_sched(td);
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
@@ -540,13 +541,16 @@ tdq_runq_rem(struct tdq *tdq, struct thread *td)
 		tdq->tdq_transferable--;
 		ts->ts_flags &= ~TSF_XFERABLE;
 	}
-	if (ts->ts_runq == &tdq->tdq_timeshare) {
-		if (tdq->tdq_idx != tdq->tdq_ridx)
-			runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
-		else
-			runq_remove_idx(ts->ts_runq, td, NULL);
-	} else
-		runq_remove(ts->ts_runq, td);
+	queue_empty = runq_remove(ts->ts_runq, td);
+	/*
+	 * If thread has a batch priority and the queue from which it was
+	 * removed is now empty, advance the batch's queue removal index if it
+	 * lags with respect to the batch's queue insertion index.
+	 */
+	if (queue_empty && PRI_MIN_BATCH <= td->td_priority &&
+	    td->td_priority <= PRI_MAX_BATCH &&
+	    tdq->tdq_idx != tdq->tdq_ridx && tdq->tdq_ridx == td->td_rqindex)
+		tdq->tdq_ridx = (tdq->tdq_ridx + 1) % RQ_NQS;
 }
 
 /*
@@ -2653,24 +2657,13 @@ sched_estcpu(struct thread *td __unused)
  * Return whether the current CPU has runnable tasks.  Used for in-kernel
  * cooperative idle threads.
  */
-int
+bool
 sched_runnable(void)
 {
 	struct tdq *tdq;
-	int load;
-
-	load = 1;
 
 	tdq = TDQ_SELF();
-	if ((curthread->td_flags & TDF_IDLETD) != 0) {
-		if (TDQ_LOAD(tdq) > 0)
-			goto out;
-	} else
-		if (TDQ_LOAD(tdq) - 1 > 0)
-			goto out;
-	load = 0;
-out:
-	return (load);
+	return (TDQ_LOAD(tdq) > (TD_IS_IDLETHREAD(curthread) ? 0 : 1));
 }
 
 /*
diff --git a/sys/sys/runq.h b/sys/sys/runq.h
index b9cfa847617b..693a3666d7a8 100644
--- a/sys/sys/runq.h
+++ b/sys/sys/runq.h
@@ -50,6 +50,8 @@
 typedef	unsigned long	rqb_word_t;	/* runq's status words type. */
 
 #define	RQ_NQS	(howmany(RQ_MAX_PRIO + 1, RQ_PPQ)) /* Number of run queues. */
+#define	RQ_PRI_TO_IDX(pri)	((pri) / RQ_PPQ) /* Priority to queue index. */
+
 #define	RQB_BPW	(sizeof(rqb_word_t) * NBBY) /* Bits per runq word. */
 #define	RQB_LEN	(howmany(RQ_NQS, RQB_BPW)) /* Words to cover RQ_NQS queues. */
 #define	RQB_WORD(idx)	((idx) / RQB_BPW)
@@ -58,6 +60,7 @@ typedef	unsigned long	rqb_word_t;	/* runq's status words type. */
 
 
 #ifdef _KERNEL
+#include <sys/types.h>		/* For bool. */
 #include <sys/queue.h>
 
 struct thread;
@@ -84,15 +87,14 @@ struct runq {
 	struct	rqhead rq_queues[RQ_NQS];
 };
 
-void	runq_add(struct runq *, struct thread *, int);
-void	runq_add_pri(struct runq *, struct thread *, u_char, int);
-int	runq_check(struct runq *);
+void	runq_add(struct runq *, struct thread *, int _flags);
+void	runq_add_idx(struct runq *, struct thread *, int _idx, int _flags);
+bool	runq_not_empty(struct runq *);
 struct	thread *runq_choose(struct runq *);
-struct	thread *runq_choose_from(struct runq *, u_char);
-struct	thread *runq_choose_fuzz(struct runq *, int);
+struct	thread *runq_choose_from(struct runq *, int _idx);
+struct	thread *runq_choose_fuzz(struct runq *, int _fuzz);
 void	runq_init(struct runq *);
-void	runq_remove(struct runq *, struct thread *);
-void	runq_remove_idx(struct runq *, struct thread *, u_char *);
+bool	runq_remove(struct runq *, struct thread *);
 #endif /* _KERNEL */
 
 #endif
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index f017863270b4..052b8bb85f2f 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -63,6 +63,15 @@
 #define	_SCHED_H_
 
 #ifdef _KERNEL
+
+#include <sys/types.h>
+#ifdef SCHED_STATS
+#include <sys/pcpu.h>
+#endif
+
+struct proc;
+struct thread;
+
 /*
  * General scheduling info.
  *
@@ -74,7 +83,7 @@
  */
 int	sched_load(void);
 int	sched_rr_interval(void);
-int	sched_runnable(void);
+bool	sched_runnable(void);
 
 /* 
  * Proc related scheduling hooks.



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202506180213.55I2DBsU024029>