Date: Wed, 18 Jun 2025 02:13:21 GMT From: Olivier Certner <olce@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: f4be333bc567 - main - sched_ule: Re-implement stealing on top of runq common-code Message-ID: <202506180213.55I2DLlc024334@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch main has been updated by olce: URL: https://cgit.FreeBSD.org/src/commit/?id=f4be333bc56759d33dca49ab4d19eaf22134e844 commit f4be333bc56759d33dca49ab4d19eaf22134e844 Author: Olivier Certner <olce@FreeBSD.org> AuthorDate: 2024-04-29 00:55:40 +0000 Commit: Olivier Certner <olce@FreeBSD.org> CommitDate: 2025-06-18 02:08:01 +0000 sched_ule: Re-implement stealing on top of runq common-code Stop using internal knowledge of runqueues. Remove duplicate boilerplate parts. Concretely, runq_steal() and runq_steal_from() are now implemented on top of runq_findq(). Besides considerably simplifying the code, this change also brings an algorithmic improvement since, previously, set bits in the runqueue's status words were found by testing each bit individually in a loop instead of using ffsl()/bsfl() (except for the first set bit per status word). This change also makes it more apparent that runq_steal_from() treats the first thread with highest priority specifically (which runq_steal() does not). MFC after: 1 month Event: Kitchener-Waterloo Hackathon 202506 Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D45388 --- sys/kern/sched_ule.c | 124 ++++++++++++++++++++++++++++----------------------- 1 file changed, 68 insertions(+), 56 deletions(-) diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index c23d00fd6049..5c7665eb7add 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -1183,51 +1183,68 @@ tdq_notify(struct tdq *tdq, int lowpri) ipi_cpu(cpu, IPI_PREEMPT); } -/* - * Steals load from a timeshare queue. Honors the rotating queue head - * index. - */ -static struct thread * -runq_steal_from(struct runq *rq, int cpu, u_char start) +struct runq_steal_pred_data { + struct thread *td; + struct thread *first; + int cpu; + bool use_first_last; +}; + +static bool +runq_steal_pred(const int idx, struct rq_queue *const q, void *const data) { - struct rq_status *rqs; - struct rq_queue *rqq; - struct thread *td, *first; - int bit; - int i; + struct runq_steal_pred_data *const d = data; + struct thread *td; - rqs = &rq->rq_status; - bit = RQSW_BIT_IDX(start); - first = NULL; -again: - for (i = RQSW_IDX(start); i < RQSW_NB; bit = 0, i++) { - if (rqs->rq_sw[i] == 0) + TAILQ_FOREACH(td, q, td_runq) { + if (d->use_first_last && d->first == NULL) { + d->first = td; continue; - if (bit == 0) - bit = RQSW_BSF(rqs->rq_sw[i]); - for (; bit < RQSW_BPW; bit++) { - if ((rqs->rq_sw[i] & (1ul << bit)) == 0) - continue; - rqq = &rq->rq_queues[RQSW_TO_QUEUE_IDX(i, bit)]; - TAILQ_FOREACH(td, rqq, td_runq) { - if (first) { - if (THREAD_CAN_MIGRATE(td) && - THREAD_CAN_SCHED(td, cpu)) - return (td); - } else - first = td; - } + } + + if (THREAD_CAN_MIGRATE(td) && THREAD_CAN_SCHED(td, d->cpu)) { + d->td = td; + return (true); } } - if (start != 0) { - start = 0; - goto again; + + return (false); +} + +/* + * Steals load from a timeshare queue. Honors the rotating queue head + * index. + */ +static inline struct thread * +runq_steal_from(struct runq *const rq, int cpu, int start_idx) +{ + struct runq_steal_pred_data data = { + .td = NULL, + .first = NULL, + .cpu = cpu, + .use_first_last = true + }; + int idx; + + idx = runq_findq(rq, start_idx, RQ_NQS - 1, &runq_steal_pred, &data); + if (idx != -1) + goto found; + + MPASS(data.td == NULL); + if (start_idx != 0) { + idx = runq_findq(rq, 0, start_idx - 1, &runq_steal_pred, &data); + if (idx != -1) + goto found; } - if (first && THREAD_CAN_MIGRATE(first) && - THREAD_CAN_SCHED(first, cpu)) - return (first); + MPASS(idx == -1 && data.td == NULL); + if (data.first != NULL && THREAD_CAN_MIGRATE(data.first) && + THREAD_CAN_SCHED(data.first, cpu)) + return (data.first); return (NULL); +found: + MPASS(data.td != NULL); + return (data.td); } /* @@ -1236,26 +1253,21 @@ again: static struct thread * runq_steal(struct runq *rq, int cpu) { - struct rq_queue *rqq; - struct rq_status *rqs; - struct thread *td; - int word; - int bit; - - rqs = &rq->rq_status; - for (word = 0; word < RQSW_NB; word++) { - if (rqs->rq_sw[word] == 0) - continue; - for (bit = 0; bit < RQSW_BPW; bit++) { - if ((rqs->rq_sw[word] & (1ul << bit)) == 0) - continue; - rqq = &rq->rq_queues[RQSW_TO_QUEUE_IDX(word, bit)]; - TAILQ_FOREACH(td, rqq, td_runq) - if (THREAD_CAN_MIGRATE(td) && - THREAD_CAN_SCHED(td, cpu)) - return (td); - } + struct runq_steal_pred_data data = { + .td = NULL, + .first = NULL, + .cpu = cpu, + .use_first_last = false + }; + int idx; + + idx = runq_findq(rq, 0, RQ_NQS - 1, &runq_steal_pred, &data); + if (idx != -1) { + MPASS(data.td != NULL); + return (data.td); } + + MPASS(data.td == NULL); return (NULL); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202506180213.55I2DLlc024334>