1 /* SPDX-License-Identifier: GPL-2.0 */
3 * A central FIFO sched_ext scheduler which demonstrates the followings:
5 * a. Making all scheduling decisions from one CPU:
7 * The central CPU is the only one making scheduling decisions. All other
8 * CPUs kick the central CPU when they run out of tasks to run.
10 * There is one global BPF queue and the central CPU schedules all CPUs by
11 * dispatching from the global queue to each CPU's local dsq from dispatch().
12 * This isn't the most straightforward. e.g. It'd be easier to bounce
13 * through per-CPU BPF queues. The current design is chosen to maximally
14 * utilize and verify various SCX mechanisms such as LOCAL_ON dispatching.
16 * b. Tickless operation
18 * All tasks are dispatched with the infinite slice which allows stopping the
19 * ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full
20 * parameter. The tickless operation can be observed through
23 * Periodic switching is enforced by a periodic timer checking all CPUs and
24 * preempting them as necessary. Unfortunately, BPF timer currently doesn't
25 * have a way to pin to a specific CPU, so the periodic timer isn't pinned to
30 * Kthreads are unconditionally queued to the head of a matching local dsq
31 * and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always
32 * prioritized over user threads, which is required for ensuring forward
33 * progress as e.g. the periodic timer may run on a ksoftirqd and if the
34 * ksoftirqd gets starved by a user thread, there may not be anything else to
35 * vacate that user thread.
37 * SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the
40 * This scheduler is designed to maximize usage of various SCX mechanisms. A
41 * more practical implementation would likely put the scheduling loop outside
42 * the central CPU's dispatch() path and add some form of priority mechanism.
44 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
48 #include <scx/common.bpf.h>
50 char _license[] SEC("license") = "GPL";
54 MS_TO_NS = 1000LLU * 1000,
55 TIMER_INTERVAL_NS = 1 * MS_TO_NS,
58 const volatile s32 central_cpu;
59 const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */
60 const volatile u64 slice_ns;
62 bool timer_pinned = true;
63 u64 nr_total, nr_locals, nr_queued, nr_lost_pids;
64 u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries;
70 __uint(type, BPF_MAP_TYPE_QUEUE);
71 __uint(max_entries, 4096);
73 } central_q SEC(".maps");
75 /* can't use percpu map due to bad lookups */
76 bool RESIZABLE_ARRAY(data, cpu_gimme_task);
77 u64 RESIZABLE_ARRAY(data, cpu_started_at);
79 struct central_timer {
80 struct bpf_timer timer;
84 __uint(type, BPF_MAP_TYPE_ARRAY);
85 __uint(max_entries, 1);
87 __type(value, struct central_timer);
88 } central_timer SEC(".maps");
90 s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
91 s32 prev_cpu, u64 wake_flags)
94 * Steer wakeups to the central CPU as much as possible to avoid
95 * disturbing other CPUs. It's safe to blindly return the central cpu as
96 * select_cpu() is a hint and if @p can't be on it, the kernel will
97 * automatically pick a fallback CPU.
102 void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags)
106 __sync_fetch_and_add(&nr_total, 1);
109 * Push per-cpu kthreads at the head of local dsq's and preempt the
110 * corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked
111 * behind other threads which is necessary for forward progress
112 * guarantee as we depend on the BPF timer which may run from ksoftirqd.
114 if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) {
115 __sync_fetch_and_add(&nr_locals, 1);
116 scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_INF,
117 enq_flags | SCX_ENQ_PREEMPT);
121 if (bpf_map_push_elem(¢ral_q, &pid, 0)) {
122 __sync_fetch_and_add(&nr_overflows, 1);
123 scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags);
127 __sync_fetch_and_add(&nr_queued, 1);
129 if (!scx_bpf_task_running(p))
130 scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
133 static bool dispatch_to_cpu(s32 cpu)
135 struct task_struct *p;
138 bpf_repeat(BPF_MAX_LOOPS) {
139 if (bpf_map_pop_elem(¢ral_q, &pid))
142 __sync_fetch_and_sub(&nr_queued, 1);
144 p = bpf_task_from_pid(pid);
146 __sync_fetch_and_add(&nr_lost_pids, 1);
151 * If we can't run the task at the top, do the dumb thing and
152 * bounce it to the fallback dsq.
154 if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) {
155 __sync_fetch_and_add(&nr_mismatches, 1);
156 scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0);
159 * We might run out of dispatch buffer slots if we continue dispatching
160 * to the fallback DSQ, without dispatching to the local DSQ of the
161 * target CPU. In such a case, break the loop now as will fail the
162 * next dispatch operation.
164 if (!scx_bpf_dispatch_nr_slots())
169 /* dispatch to local and mark that @cpu doesn't need more */
170 scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0);
172 if (cpu != central_cpu)
173 scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
182 void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)
184 if (cpu == central_cpu) {
185 /* dispatch for all other CPUs first */
186 __sync_fetch_and_add(&nr_dispatches, 1);
188 bpf_for(cpu, 0, nr_cpu_ids) {
191 if (!scx_bpf_dispatch_nr_slots())
194 /* central's gimme is never set */
195 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
196 if (!gimme || !*gimme)
199 if (dispatch_to_cpu(cpu))
204 * Retry if we ran out of dispatch buffer slots as we might have
205 * skipped some CPUs and also need to dispatch for self. The ext
206 * core automatically retries if the local dsq is empty but we
207 * can't rely on that as we're dispatching for other CPUs too.
208 * Kick self explicitly to retry.
210 if (!scx_bpf_dispatch_nr_slots()) {
211 __sync_fetch_and_add(&nr_retries, 1);
212 scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
216 /* look for a task to run on the central CPU */
217 if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))
219 dispatch_to_cpu(central_cpu);
223 if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))
226 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
231 * Force dispatch on the scheduling CPU so that it finds a task
234 scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
238 void BPF_STRUCT_OPS(central_running, struct task_struct *p)
240 s32 cpu = scx_bpf_task_cpu(p);
241 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
243 *started_at = scx_bpf_now() ?: 1; /* 0 indicates idle */
246 void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
248 s32 cpu = scx_bpf_task_cpu(p);
249 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
254 static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
256 u64 now = scx_bpf_now();
257 u64 nr_to_kick = nr_queued;
260 curr_cpu = bpf_get_smp_processor_id();
261 if (timer_pinned && (curr_cpu != central_cpu)) {
262 scx_bpf_error("Central timer ran on CPU %d, not central CPU %d",
263 curr_cpu, central_cpu);
267 bpf_for(i, 0, nr_cpu_ids) {
268 s32 cpu = (nr_timers + i) % nr_cpu_ids;
271 if (cpu == central_cpu)
274 /* kick iff the current one exhausted its slice */
275 started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
276 if (started_at && *started_at &&
277 time_before(now, *started_at + slice_ns))
280 /* and there's something pending */
281 if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) ||
282 scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu))
289 scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT);
292 bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
293 __sync_fetch_and_add(&nr_timers, 1);
297 int BPF_STRUCT_OPS_SLEEPABLE(central_init)
300 struct bpf_timer *timer;
303 ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1);
307 timer = bpf_map_lookup_elem(¢ral_timer, &key);
311 if (bpf_get_smp_processor_id() != central_cpu) {
312 scx_bpf_error("init from non-central CPU");
316 bpf_timer_init(timer, ¢ral_timer, CLOCK_MONOTONIC);
317 bpf_timer_set_callback(timer, central_timerfn);
319 ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
321 * BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a
322 * kernel which doesn't have it, bpf_timer_start() will return -EINVAL.
323 * Retry without the PIN. This would be the perfect use case for
324 * bpf_core_enum_value_exists() but the enum type doesn't have a name
325 * and can't be used with bpf_core_enum_value_exists(). Oh well...
327 if (ret == -EINVAL) {
328 timer_pinned = false;
329 ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0);
332 scx_bpf_error("bpf_timer_start failed (%d)", ret);
336 void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei)
341 SCX_OPS_DEFINE(central_ops,
343 * We are offloading all scheduling decisions to the central CPU
344 * and thus being the last task on a given CPU doesn't mean
345 * anything special. Enqueue the last tasks like any other tasks.
347 .flags = SCX_OPS_ENQ_LAST,
349 .select_cpu = (void *)central_select_cpu,
350 .enqueue = (void *)central_enqueue,
351 .dispatch = (void *)central_dispatch,
352 .running = (void *)central_running,
353 .stopping = (void *)central_stopping,
354 .init = (void *)central_init,
355 .exit = (void *)central_exit,