1 /* SPDX-License-Identifier: GPL-2.0 */
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
9 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
12 SCX_DSP_DFL_MAX_BATCH = 32,
13 SCX_DSP_MAX_LOOPS = 32,
14 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
17 SCX_EXIT_MSG_LEN = 1024,
18 SCX_EXIT_DUMP_DFL_LEN = 32768,
20 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
23 * Iterating all tasks may take a while. Periodically drop
24 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
26 SCX_OPS_TASK_ITER_BATCH = 32,
33 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
34 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
35 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
36 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
38 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
39 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
40 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
44 * An exit code can be specified when exiting with scx_bpf_exit() or
45 * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
46 * respectively. The codes are 64bit of the format:
48 * Bits: [63 .. 48 47 .. 32 31 .. 0]
49 * [ SYS ACT ] [ SYS RSN ] [ USR ]
51 * SYS ACT: System-defined exit actions
52 * SYS RSN: System-defined exit reasons
53 * USR : User-defined exit codes and reasons
55 * Using the above, users may communicate intention and context by ORing system
56 * actions and/or system reasons with a user-defined exit code.
60 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
63 SCX_ECODE_ACT_RESTART = 1LLU << 48,
67 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
70 struct scx_exit_info {
71 /* %SCX_EXIT_* - broad category of the exit reason */
72 enum scx_exit_kind kind;
74 /* exit code if gracefully exiting */
77 /* textual representation of the above */
80 /* backtrace if exiting due to an error */
84 /* informational message */
91 /* sched_ext_ops.flags */
94 * Keep built-in idle tracking even if ops.update_idle() is implemented.
96 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
99 * By default, if there are no other task to run on the CPU, ext core
100 * keeps running the current task even after its slice expires. If this
101 * flag is specified, such tasks are passed to ops.enqueue() with
102 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
104 SCX_OPS_ENQ_LAST = 1LLU << 1,
107 * An exiting task may schedule after PF_EXITING is set. In such cases,
108 * bpf_task_from_pid() may not be able to find the task and if the BPF
109 * scheduler depends on pid lookup for dispatching, the task will be
110 * lost leading to various issues including RCU grace period stalls.
112 * To mask this problem, by default, unhashed tasks are automatically
113 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
114 * depend on pid lookups and wants to handle these tasks directly, the
115 * following flag can be used.
117 SCX_OPS_ENQ_EXITING = 1LLU << 2,
120 * If set, only tasks with policy set to SCHED_EXT are attached to
121 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
123 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
126 * A migration disabled task can only execute on its current CPU. By
127 * default, such tasks are automatically put on the CPU's local DSQ with
128 * the default slice on enqueue. If this ops flag is set, they also go
129 * through ops.enqueue().
131 * A migration disabled task never invokes ops.select_cpu() as it can
132 * only select the current CPU. Also, p->cpus_ptr will only contain its
133 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
134 * and thus may disagree with cpumask_weight(p->cpus_ptr).
136 SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4,
139 * CPU cgroup support flags
141 SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */
143 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
145 SCX_OPS_ENQ_EXITING |
146 SCX_OPS_ENQ_MIGRATION_DISABLED |
147 SCX_OPS_SWITCH_PARTIAL |
148 SCX_OPS_HAS_CGROUP_WEIGHT,
151 /* argument container for ops.init_task() */
152 struct scx_init_task_args {
154 * Set if ops.init_task() is being invoked on the fork path, as opposed
155 * to the scheduler transition path.
158 #ifdef CONFIG_EXT_GROUP_SCHED
159 /* the cgroup the task is joining */
160 struct cgroup *cgroup;
164 /* argument container for ops.exit_task() */
165 struct scx_exit_task_args {
166 /* Whether the task exited before running on sched_ext. */
170 /* argument container for ops->cgroup_init() */
171 struct scx_cgroup_init_args {
172 /* the weight of the cgroup [1..10000] */
176 enum scx_cpu_preempt_reason {
177 /* next task is being scheduled by &sched_class_rt */
179 /* next task is being scheduled by &sched_class_dl */
181 /* next task is being scheduled by &sched_class_stop */
182 SCX_CPU_PREEMPT_STOP,
183 /* unknown reason for SCX being preempted */
184 SCX_CPU_PREEMPT_UNKNOWN,
188 * Argument container for ops->cpu_acquire(). Currently empty, but may be
189 * expanded in the future.
191 struct scx_cpu_acquire_args {};
193 /* argument container for ops->cpu_release() */
194 struct scx_cpu_release_args {
195 /* the reason the CPU was preempted */
196 enum scx_cpu_preempt_reason reason;
198 /* the task that's going to be scheduled on the CPU */
199 struct task_struct *task;
203 * Informational context provided to dump operations.
205 struct scx_dump_ctx {
206 enum scx_exit_kind kind;
214 * struct sched_ext_ops - Operation table for BPF scheduler implementation
216 * A BPF scheduler can implement an arbitrary scheduling policy by
217 * implementing and loading operations in this table. Note that a userland
218 * scheduling policy can also be implemented using the BPF scheduler
221 struct sched_ext_ops {
223 * @select_cpu: Pick the target CPU for a task which is being woken up
224 * @p: task being woken up
225 * @prev_cpu: the cpu @p was on before sleeping
226 * @wake_flags: SCX_WAKE_*
228 * Decision made here isn't final. @p may be moved to any CPU while it
229 * is getting dispatched for execution later. However, as @p is not on
230 * the rq at this point, getting the eventual execution CPU right here
231 * saves a small bit of overhead down the line.
233 * If an idle CPU is returned, the CPU is kicked and will try to
234 * dispatch. While an explicit custom mechanism can be added,
235 * select_cpu() serves as the default way to wake up idle CPUs.
237 * @p may be inserted into a DSQ directly by calling
238 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
239 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
240 * of the CPU returned by this operation.
242 * Note that select_cpu() is never called for tasks that can only run
243 * on a single CPU or tasks with migration disabled, as they don't have
244 * the option to select a different CPU. See select_task_rq() for
247 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
250 * @enqueue: Enqueue a task on the BPF scheduler
251 * @p: task being enqueued
252 * @enq_flags: %SCX_ENQ_*
254 * @p is ready to run. Insert directly into a DSQ by calling
255 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
256 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
257 * the task will stall.
259 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
262 void (*enqueue)(struct task_struct *p, u64 enq_flags);
265 * @dequeue: Remove a task from the BPF scheduler
266 * @p: task being dequeued
267 * @deq_flags: %SCX_DEQ_*
269 * Remove @p from the BPF scheduler. This is usually called to isolate
270 * the task while updating its scheduling properties (e.g. priority).
272 * The ext core keeps track of whether the BPF side owns a given task or
273 * not and can gracefully ignore spurious dispatches from BPF side,
274 * which makes it safe to not implement this method. However, depending
275 * on the scheduling logic, this can lead to confusing behaviors - e.g.
276 * scheduling position not being updated across a priority change.
278 void (*dequeue)(struct task_struct *p, u64 deq_flags);
281 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
282 * @cpu: CPU to dispatch tasks for
283 * @prev: previous task being switched out
285 * Called when a CPU's local dsq is empty. The operation should dispatch
286 * one or more tasks from the BPF scheduler into the DSQs using
287 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
288 * using scx_bpf_dsq_move_to_local().
290 * The maximum number of times scx_bpf_dsq_insert() can be called
291 * without an intervening scx_bpf_dsq_move_to_local() is specified by
292 * ops.dispatch_max_batch. See the comments on top of the two functions
295 * When not %NULL, @prev is an SCX task with its slice depleted. If
296 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
297 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
298 * ops.dispatch() returns. To keep executing @prev, return without
299 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
301 void (*dispatch)(s32 cpu, struct task_struct *prev);
304 * @tick: Periodic tick
305 * @p: task running currently
307 * This operation is called every 1/HZ seconds on CPUs which are
308 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
309 * immediate dispatch cycle on the CPU.
311 void (*tick)(struct task_struct *p);
314 * @runnable: A task is becoming runnable on its associated CPU
315 * @p: task becoming runnable
316 * @enq_flags: %SCX_ENQ_*
318 * This and the following three functions can be used to track a task's
319 * execution state transitions. A task becomes ->runnable() on a CPU,
320 * and then goes through one or more ->running() and ->stopping() pairs
321 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
322 * done running on the CPU.
324 * @p is becoming runnable on the CPU because it's
326 * - waking up (%SCX_ENQ_WAKEUP)
327 * - being moved from another CPU
328 * - being restored after temporarily taken off the queue for an
331 * This and ->enqueue() are related but not coupled. This operation
332 * notifies @p's state transition and may not be followed by ->enqueue()
333 * e.g. when @p is being dispatched to a remote CPU, or when @p is
334 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
335 * task may be ->enqueue()'d without being preceded by this operation
336 * e.g. after exhausting its slice.
338 void (*runnable)(struct task_struct *p, u64 enq_flags);
341 * @running: A task is starting to run on its associated CPU
342 * @p: task starting to run
344 * See ->runnable() for explanation on the task state notifiers.
346 void (*running)(struct task_struct *p);
349 * @stopping: A task is stopping execution
350 * @p: task stopping to run
351 * @runnable: is task @p still runnable?
353 * See ->runnable() for explanation on the task state notifiers. If
354 * !@runnable, ->quiescent() will be invoked after this operation
357 void (*stopping)(struct task_struct *p, bool runnable);
360 * @quiescent: A task is becoming not runnable on its associated CPU
361 * @p: task becoming not runnable
362 * @deq_flags: %SCX_DEQ_*
364 * See ->runnable() for explanation on the task state notifiers.
366 * @p is becoming quiescent on the CPU because it's
368 * - sleeping (%SCX_DEQ_SLEEP)
369 * - being moved to another CPU
370 * - being temporarily taken off the queue for an attribute change
373 * This and ->dequeue() are related but not coupled. This operation
374 * notifies @p's state transition and may not be preceded by ->dequeue()
375 * e.g. when @p is being dispatched to a remote CPU.
377 void (*quiescent)(struct task_struct *p, u64 deq_flags);
381 * @from: yielding task
382 * @to: optional yield target task
384 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
385 * The BPF scheduler should ensure that other available tasks are
386 * dispatched before the yielding task. Return value is ignored in this
389 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
390 * scheduler can implement the request, return %true; otherwise, %false.
392 bool (*yield)(struct task_struct *from, struct task_struct *to);
395 * @core_sched_before: Task ordering for core-sched
399 * Used by core-sched to determine the ordering between two tasks. See
400 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
403 * Both @a and @b are runnable and may or may not currently be queued on
404 * the BPF scheduler. Should return %true if @a should run before @b.
405 * %false if there's no required ordering or @b should run before @a.
407 * If not specified, the default is ordering them according to when they
410 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
413 * @set_weight: Set task weight
414 * @p: task to set weight for
415 * @weight: new weight [1..10000]
417 * Update @p's weight to @weight.
419 void (*set_weight)(struct task_struct *p, u32 weight);
422 * @set_cpumask: Set CPU affinity
423 * @p: task to set CPU affinity for
424 * @cpumask: cpumask of cpus that @p can run on
426 * Update @p's CPU affinity to @cpumask.
428 void (*set_cpumask)(struct task_struct *p,
429 const struct cpumask *cpumask);
432 * @update_idle: Update the idle state of a CPU
433 * @cpu: CPU to update the idle state for
434 * @idle: whether entering or exiting the idle state
436 * This operation is called when @rq's CPU goes or leaves the idle
437 * state. By default, implementing this operation disables the built-in
438 * idle CPU tracking and the following helpers become unavailable:
440 * - scx_bpf_select_cpu_dfl()
441 * - scx_bpf_test_and_clear_cpu_idle()
442 * - scx_bpf_pick_idle_cpu()
444 * The user also must implement ops.select_cpu() as the default
445 * implementation relies on scx_bpf_select_cpu_dfl().
447 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
450 void (*update_idle)(s32 cpu, bool idle);
453 * @cpu_acquire: A CPU is becoming available to the BPF scheduler
454 * @cpu: The CPU being acquired by the BPF scheduler.
455 * @args: Acquire arguments, see the struct definition.
457 * A CPU that was previously released from the BPF scheduler is now once
458 * again under its control.
460 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
463 * @cpu_release: A CPU is taken away from the BPF scheduler
464 * @cpu: The CPU being released by the BPF scheduler.
465 * @args: Release arguments, see the struct definition.
467 * The specified CPU is no longer under the control of the BPF
468 * scheduler. This could be because it was preempted by a higher
469 * priority sched_class, though there may be other reasons as well. The
470 * caller should consult @args->reason to determine the cause.
472 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
475 * @init_task: Initialize a task to run in a BPF scheduler
476 * @p: task to initialize for BPF scheduling
477 * @args: init arguments, see the struct definition
479 * Either we're loading a BPF scheduler or a new task is being forked.
480 * Initialize @p for BPF scheduling. This operation may block and can
481 * be used for allocations, and is called exactly once for a task.
483 * Return 0 for success, -errno for failure. An error return while
484 * loading will abort loading of the BPF scheduler. During a fork, it
485 * will abort that specific fork.
487 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
490 * @exit_task: Exit a previously-running task from the system
492 * @args: exit arguments, see the struct definition
494 * @p is exiting or the BPF scheduler is being unloaded. Perform any
495 * necessary cleanup for @p.
497 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
500 * @enable: Enable BPF scheduling for a task
501 * @p: task to enable BPF scheduling for
503 * Enable @p for BPF scheduling. enable() is called on @p any time it
504 * enters SCX, and is always paired with a matching disable().
506 void (*enable)(struct task_struct *p);
509 * @disable: Disable BPF scheduling for a task
510 * @p: task to disable BPF scheduling for
512 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
513 * Disable BPF scheduling for @p. A disable() call is always matched
514 * with a prior enable() call.
516 void (*disable)(struct task_struct *p);
519 * @dump: Dump BPF scheduler state on error
520 * @ctx: debug dump context
522 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
524 void (*dump)(struct scx_dump_ctx *ctx);
527 * @dump_cpu: Dump BPF scheduler state for a CPU on error
528 * @ctx: debug dump context
529 * @cpu: CPU to generate debug dump for
530 * @idle: @cpu is currently idle without any runnable tasks
532 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
533 * @cpu. If @idle is %true and this operation doesn't produce any
534 * output, @cpu is skipped for dump.
536 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
539 * @dump_task: Dump BPF scheduler state for a runnable task on error
540 * @ctx: debug dump context
541 * @p: runnable task to generate debug dump for
543 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
546 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
548 #ifdef CONFIG_EXT_GROUP_SCHED
550 * @cgroup_init: Initialize a cgroup
551 * @cgrp: cgroup being initialized
552 * @args: init arguments, see the struct definition
554 * Either the BPF scheduler is being loaded or @cgrp created, initialize
555 * @cgrp for sched_ext. This operation may block.
557 * Return 0 for success, -errno for failure. An error return while
558 * loading will abort loading of the BPF scheduler. During cgroup
559 * creation, it will abort the specific cgroup creation.
561 s32 (*cgroup_init)(struct cgroup *cgrp,
562 struct scx_cgroup_init_args *args);
565 * @cgroup_exit: Exit a cgroup
566 * @cgrp: cgroup being exited
568 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
569 * @cgrp for sched_ext. This operation my block.
571 void (*cgroup_exit)(struct cgroup *cgrp);
574 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
575 * @p: task being moved
576 * @from: cgroup @p is being moved from
577 * @to: cgroup @p is being moved to
579 * Prepare @p for move from cgroup @from to @to. This operation may
580 * block and can be used for allocations.
582 * Return 0 for success, -errno for failure. An error return aborts the
585 s32 (*cgroup_prep_move)(struct task_struct *p,
586 struct cgroup *from, struct cgroup *to);
589 * @cgroup_move: Commit cgroup move
590 * @p: task being moved
591 * @from: cgroup @p is being moved from
592 * @to: cgroup @p is being moved to
594 * Commit the move. @p is dequeued during this operation.
596 void (*cgroup_move)(struct task_struct *p,
597 struct cgroup *from, struct cgroup *to);
600 * @cgroup_cancel_move: Cancel cgroup move
601 * @p: task whose cgroup move is being canceled
602 * @from: cgroup @p was being moved from
603 * @to: cgroup @p was being moved to
605 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
606 * Undo the preparation.
608 void (*cgroup_cancel_move)(struct task_struct *p,
609 struct cgroup *from, struct cgroup *to);
612 * @cgroup_set_weight: A cgroup's weight is being changed
613 * @cgrp: cgroup whose weight is being updated
614 * @weight: new weight [1..10000]
616 * Update @tg's weight to @weight.
618 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
619 #endif /* CONFIG_EXT_GROUP_SCHED */
622 * All online ops must come before ops.cpu_online().
626 * @cpu_online: A CPU became online
627 * @cpu: CPU which just came up
629 * @cpu just came online. @cpu will not call ops.enqueue() or
630 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
632 void (*cpu_online)(s32 cpu);
635 * @cpu_offline: A CPU is going offline
636 * @cpu: CPU which is going offline
638 * @cpu is going offline. @cpu will not call ops.enqueue() or
639 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
641 void (*cpu_offline)(s32 cpu);
644 * All CPU hotplug ops must come before ops.init().
648 * @init: Initialize the BPF scheduler
653 * @exit: Clean up after the BPF scheduler
656 * ops.exit() is also called on ops.init() failure, which is a bit
657 * unusual. This is to allow rich reporting through @info on how
660 void (*exit)(struct scx_exit_info *info);
663 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
665 u32 dispatch_max_batch;
668 * @flags: %SCX_OPS_* flags
673 * @timeout_ms: The maximum amount of time, in milliseconds, that a
674 * runnable task should be able to wait before being scheduled. The
675 * maximum timeout may not exceed the default timeout of 30 seconds.
677 * Defaults to the maximum allowed timeout value of 30 seconds.
682 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
683 * value of 32768 is used.
688 * @hotplug_seq: A sequence number that may be set by the scheduler to
689 * detect when a hotplug event has occurred during the loading process.
690 * If 0, no detection occurs. Otherwise, the scheduler will fail to
691 * load if the sequence number does not match @scx_hotplug_seq on the
697 * @name: BPF scheduler's name
699 * Must be a non-zero valid BPF object name including only isalnum(),
700 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
701 * BPF scheduler is enabled.
703 char name[SCX_OPS_NAME_LEN];
708 SCX_OPI_NORMAL_BEGIN = 0,
709 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
710 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
711 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
712 SCX_OPI_END = SCX_OP_IDX(init),
715 enum scx_wake_flags {
716 /* expose select WF_* flags as enums */
717 SCX_WAKE_FORK = WF_FORK,
718 SCX_WAKE_TTWU = WF_TTWU,
719 SCX_WAKE_SYNC = WF_SYNC,
723 /* expose select ENQUEUE_* flags as enums */
724 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
725 SCX_ENQ_HEAD = ENQUEUE_HEAD,
726 SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED,
728 /* high 32bits are SCX specific */
731 * Set the following to trigger preemption when calling
732 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
733 * current task is cleared to zero and the CPU is kicked into the
734 * scheduling path. Implies %SCX_ENQ_HEAD.
736 SCX_ENQ_PREEMPT = 1LLU << 32,
739 * The task being enqueued was previously enqueued on the current CPU's
740 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
741 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
742 * invoked in a ->cpu_release() callback, and the task is again
743 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
744 * task will not be scheduled on the CPU until at least the next invocation
745 * of the ->cpu_acquire() callback.
747 SCX_ENQ_REENQ = 1LLU << 40,
750 * The task being enqueued is the only task available for the cpu. By
751 * default, ext core keeps executing such tasks but when
752 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
753 * %SCX_ENQ_LAST flag set.
755 * The BPF scheduler is responsible for triggering a follow-up
756 * scheduling event. Otherwise, Execution may stall.
758 SCX_ENQ_LAST = 1LLU << 41,
760 /* high 8 bits are internal */
761 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
763 SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
764 SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
768 /* expose select DEQUEUE_* flags as enums */
769 SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
771 /* high 32bits are SCX specific */
774 * The generic core-sched layer decided to execute the task even though
775 * it hasn't been dispatched yet. Dequeue from the BPF side.
777 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
780 enum scx_pick_idle_cpu_flags {
781 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
784 enum scx_kick_flags {
786 * Kick the target CPU if idle. Guarantees that the target CPU goes
787 * through at least one full scheduling cycle before going idle. If the
788 * target CPU can be determined to be currently not idle and going to go
789 * through a scheduling cycle before going idle, noop.
791 SCX_KICK_IDLE = 1LLU << 0,
794 * Preempt the current task and execute the dispatch path. If the
795 * current task of the target CPU is an SCX task, its ->scx.slice is
796 * cleared to zero before the scheduling path is invoked so that the
797 * task expires and the dispatch path is invoked.
799 SCX_KICK_PREEMPT = 1LLU << 1,
802 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
803 * return after the target CPU finishes picking the next task.
805 SCX_KICK_WAIT = 1LLU << 2,
809 SCX_TG_ONLINE = 1U << 0,
810 SCX_TG_INITED = 1U << 1,
813 enum scx_ops_enable_state {
820 static const char *scx_ops_enable_state_str[] = {
821 [SCX_OPS_ENABLING] = "enabling",
822 [SCX_OPS_ENABLED] = "enabled",
823 [SCX_OPS_DISABLING] = "disabling",
824 [SCX_OPS_DISABLED] = "disabled",
828 * sched_ext_entity->ops_state
830 * Used to track the task ownership between the SCX core and the BPF scheduler.
831 * State transitions look as follows:
833 * NONE -> QUEUEING -> QUEUED -> DISPATCHING
836 * \-------------------------------/
838 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
839 * sites for explanations on the conditions being waited upon and why they are
840 * safe. Transitions out of them into NONE or QUEUED must store_release and the
841 * waiters should load_acquire.
843 * Tracking scx_ops_state enables sched_ext core to reliably determine whether
844 * any given task can be dispatched by the BPF scheduler at all times and thus
845 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
846 * to try to dispatch any task anytime regardless of its state as the SCX core
847 * can safely reject invalid dispatches.
850 SCX_OPSS_NONE, /* owned by the SCX core */
851 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
852 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
853 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
856 * QSEQ brands each QUEUED instance so that, when dispatch races
857 * dequeue/requeue, the dispatcher can tell whether it still has a claim
858 * on the task being dispatched.
860 * As some 32bit archs can't do 64bit store_release/load_acquire,
861 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
862 * 32bit machines. The dispatch race window QSEQ protects is very narrow
863 * and runs with IRQ disabled. 30 bits should be sufficient.
865 SCX_OPSS_QSEQ_SHIFT = 2,
868 /* Use macros to ensure that the type is unsigned long for the masks */
869 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
870 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
873 * During exit, a task may schedule after losing its PIDs. When disabling the
874 * BPF scheduler, we need to be able to iterate tasks in every state to
875 * guarantee system safety. Maintain a dedicated task list which contains every
876 * task between its fork and eventual free.
878 static DEFINE_SPINLOCK(scx_tasks_lock);
879 static LIST_HEAD(scx_tasks);
881 /* ops enable/disable */
882 static struct kthread_worker *scx_ops_helper;
883 static DEFINE_MUTEX(scx_ops_enable_mutex);
884 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
885 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
886 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
887 static unsigned long scx_in_softlockup;
888 static atomic_t scx_ops_breather_depth = ATOMIC_INIT(0);
889 static int scx_ops_bypass_depth;
890 static bool scx_ops_init_task_enabled;
891 static bool scx_switching_all;
892 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
894 static struct sched_ext_ops scx_ops;
895 static bool scx_warned_zero_slice;
897 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
898 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
899 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_migration_disabled);
900 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
901 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
904 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
905 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
908 static struct static_key_false scx_has_op[SCX_OPI_END] =
909 { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
911 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
912 static struct scx_exit_info *scx_exit_info;
914 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
915 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
918 * A monotically increasing sequence number that is incremented every time a
919 * scheduler is enabled. This can be used by to check if any custom sched_ext
920 * scheduler has ever been used in the system.
922 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
925 * The maximum amount of time in jiffies that a task may be runnable without
926 * being scheduled on a CPU. If this timeout is exceeded, it will trigger
929 static unsigned long scx_watchdog_timeout;
932 * The last time the delayed work was run. This delayed work relies on
933 * ksoftirqd being able to run to service timer interrupts, so it's possible
934 * that this work itself could get wedged. To account for this, we check that
935 * it's not stalled in the timer tick, and trigger an error if it is.
937 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
939 static struct delayed_work scx_watchdog_work;
943 #ifdef CONFIG_CPUMASK_OFFSTACK
944 #define CL_ALIGNED_IF_ONSTACK
946 #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
952 } idle_masks CL_ALIGNED_IF_ONSTACK;
954 #endif /* CONFIG_SMP */
956 /* for %SCX_KICK_WAIT */
957 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
960 * Direct dispatch marker.
962 * Non-NULL values are used for direct dispatch from enqueue path. A valid
963 * pointer points to the task currently being enqueued. An ERR_PTR value is used
964 * to indicate that direct dispatch has already happened.
966 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
971 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
972 * to avoid live-locking in bypass mode where all tasks are dispatched to
973 * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
974 * sufficient, it can be further split.
976 static struct scx_dispatch_q **global_dsqs;
978 static const struct rhashtable_params dsq_hash_params = {
979 .key_len = sizeof_field(struct scx_dispatch_q, id),
980 .key_offset = offsetof(struct scx_dispatch_q, id),
981 .head_offset = offsetof(struct scx_dispatch_q, hash_node),
984 static struct rhashtable dsq_hash;
985 static LLIST_HEAD(dsqs_to_free);
988 struct scx_dsp_buf_ent {
989 struct task_struct *task;
995 static u32 scx_dsp_max_batch;
1001 struct scx_dsp_buf_ent buf[];
1004 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
1006 /* string formatting from BPF */
1007 struct scx_bstr_buf {
1008 u64 data[MAX_BPRINTF_VARARGS];
1009 char line[SCX_EXIT_MSG_LEN];
1012 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
1013 static struct scx_bstr_buf scx_exit_bstr_buf;
1015 /* ops debug dump */
1016 struct scx_dump_data {
1022 struct scx_bstr_buf buf;
1025 static struct scx_dump_data scx_dump_data = {
1029 /* /sys/kernel/sched_ext interface */
1030 static struct kset *scx_kset;
1031 static struct kobject *scx_root_kobj;
1033 #define CREATE_TRACE_POINTS
1034 #include <trace/events/sched_ext.h>
1036 static void process_ddsp_deferred_locals(struct rq *rq);
1037 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1038 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
1040 const char *fmt, ...);
1042 #define scx_ops_error_kind(err, fmt, args...) \
1043 scx_ops_exit_kind((err), 0, fmt, ##args)
1045 #define scx_ops_exit(code, fmt, args...) \
1046 scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
1048 #define scx_ops_error(fmt, args...) \
1049 scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
1051 #define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
1053 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1055 if (time_after(at, now))
1056 return jiffies_to_msecs(at - now);
1058 return -(long)jiffies_to_msecs(now - at);
1061 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
1062 static u32 higher_bits(u32 flags)
1064 return ~((1 << fls(flags)) - 1);
1067 /* return the mask with only the highest bit set */
1068 static u32 highest_bit(u32 flags)
1070 int bit = fls(flags);
1071 return ((u64)1 << bit) >> 1;
1074 static bool u32_before(u32 a, u32 b)
1076 return (s32)(a - b) < 0;
1079 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1081 return global_dsqs[cpu_to_node(task_cpu(p))];
1084 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1086 return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1090 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1091 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1092 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1093 * whether it's running from an allowed context.
1095 * @mask is constant, always inline to cull the mask calculations.
1097 static __always_inline void scx_kf_allow(u32 mask)
1099 /* nesting is allowed only in increasing scx_kf_mask order */
1100 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1101 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1102 current->scx.kf_mask, mask);
1103 current->scx.kf_mask |= mask;
1107 static void scx_kf_disallow(u32 mask)
1110 current->scx.kf_mask &= ~mask;
1113 #define SCX_CALL_OP(mask, op, args...) \
1116 scx_kf_allow(mask); \
1118 scx_kf_disallow(mask); \
1124 #define SCX_CALL_OP_RET(mask, op, args...) \
1126 __typeof__(scx_ops.op(args)) __ret; \
1128 scx_kf_allow(mask); \
1129 __ret = scx_ops.op(args); \
1130 scx_kf_disallow(mask); \
1132 __ret = scx_ops.op(args); \
1138 * Some kfuncs are allowed only on the tasks that are subjects of the
1139 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1140 * restrictions, the following SCX_CALL_OP_*() variants should be used when
1141 * invoking scx_ops operations that take task arguments. These can only be used
1142 * for non-nesting operations due to the way the tasks are tracked.
1144 * kfuncs which can only operate on such tasks can in turn use
1145 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1146 * the specific task.
1148 #define SCX_CALL_OP_TASK(mask, op, task, args...) \
1150 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1151 current->scx.kf_tasks[0] = task; \
1152 SCX_CALL_OP(mask, op, task, ##args); \
1153 current->scx.kf_tasks[0] = NULL; \
1156 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...) \
1158 __typeof__(scx_ops.op(task, ##args)) __ret; \
1159 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1160 current->scx.kf_tasks[0] = task; \
1161 __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \
1162 current->scx.kf_tasks[0] = NULL; \
1166 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...) \
1168 __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \
1169 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1170 current->scx.kf_tasks[0] = task0; \
1171 current->scx.kf_tasks[1] = task1; \
1172 __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \
1173 current->scx.kf_tasks[0] = NULL; \
1174 current->scx.kf_tasks[1] = NULL; \
1178 /* @mask is constant, always inline to cull unnecessary branches */
1179 static __always_inline bool scx_kf_allowed(u32 mask)
1181 if (unlikely(!(current->scx.kf_mask & mask))) {
1182 scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1183 mask, current->scx.kf_mask);
1188 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1189 * DISPATCH must not be called if we're running DEQUEUE which is nested
1190 * inside ops.dispatch(). We don't need to check boundaries for any
1191 * blocking kfuncs as the verifier ensures they're only called from
1194 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1195 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1196 scx_ops_error("cpu_release kfunc called from a nested operation");
1200 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1201 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1202 scx_ops_error("dispatch kfunc called from a nested operation");
1209 /* see SCX_CALL_OP_TASK() */
1210 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1211 struct task_struct *p)
1213 if (!scx_kf_allowed(mask))
1216 if (unlikely((p != current->scx.kf_tasks[0] &&
1217 p != current->scx.kf_tasks[1]))) {
1218 scx_ops_error("called on a task not being operated on");
1225 static bool scx_kf_allowed_if_unlocked(void)
1227 return !current->scx.kf_mask;
1231 * nldsq_next_task - Iterate to the next task in a non-local DSQ
1232 * @dsq: user dsq being iterated
1233 * @cur: current position, %NULL to start iteration
1234 * @rev: walk backwards
1236 * Returns %NULL when iteration is finished.
1238 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1239 struct task_struct *cur, bool rev)
1241 struct list_head *list_node;
1242 struct scx_dsq_list_node *dsq_lnode;
1244 lockdep_assert_held(&dsq->lock);
1247 list_node = &cur->scx.dsq_list.node;
1249 list_node = &dsq->list;
1251 /* find the next task, need to skip BPF iteration cursors */
1254 list_node = list_node->prev;
1256 list_node = list_node->next;
1258 if (list_node == &dsq->list)
1261 dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1263 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1265 return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1268 #define nldsq_for_each_task(p, dsq) \
1269 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
1270 (p) = nldsq_next_task((dsq), (p), false))
1274 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1275 * dispatch order. BPF-visible iterator is opaque and larger to allow future
1276 * changes without breaking backward compatibility. Can be used with
1277 * bpf_for_each(). See bpf_iter_scx_dsq_*().
1279 enum scx_dsq_iter_flags {
1280 /* iterate in the reverse dispatch order */
1281 SCX_DSQ_ITER_REV = 1U << 16,
1283 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
1284 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
1286 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
1287 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
1288 __SCX_DSQ_ITER_HAS_SLICE |
1289 __SCX_DSQ_ITER_HAS_VTIME,
1292 struct bpf_iter_scx_dsq_kern {
1293 struct scx_dsq_list_node cursor;
1294 struct scx_dispatch_q *dsq;
1297 } __attribute__((aligned(8)));
1299 struct bpf_iter_scx_dsq {
1301 } __attribute__((aligned(8)));
1305 * SCX task iterator.
1307 struct scx_task_iter {
1308 struct sched_ext_entity cursor;
1309 struct task_struct *locked;
1316 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1317 * @iter: iterator to init
1319 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1320 * must eventually be stopped with scx_task_iter_stop().
1322 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1323 * between this and the first next() call or between any two next() calls. If
1324 * the locks are released between two next() calls, the caller is responsible
1325 * for ensuring that the task being iterated remains accessible either through
1326 * RCU read lock or obtaining a reference count.
1328 * All tasks which existed when the iteration started are guaranteed to be
1329 * visited as long as they still exist.
1331 static void scx_task_iter_start(struct scx_task_iter *iter)
1333 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1334 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1336 spin_lock_irq(&scx_tasks_lock);
1338 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1339 list_add(&iter->cursor.tasks_node, &scx_tasks);
1340 iter->locked = NULL;
1344 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1347 task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1348 iter->locked = NULL;
1353 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1354 * @iter: iterator to unlock
1356 * If @iter is in the middle of a locked iteration, it may be locking the rq of
1357 * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1358 * This function can be safely called anytime during an iteration.
1360 static void scx_task_iter_unlock(struct scx_task_iter *iter)
1362 __scx_task_iter_rq_unlock(iter);
1363 spin_unlock_irq(&scx_tasks_lock);
1367 * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1368 * @iter: iterator to re-lock
1370 * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1371 * doesn't re-lock the rq lock. Must be called before other iterator operations.
1373 static void scx_task_iter_relock(struct scx_task_iter *iter)
1375 spin_lock_irq(&scx_tasks_lock);
1379 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1380 * @iter: iterator to exit
1382 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1383 * which is released on return. If the iterator holds a task's rq lock, that rq
1384 * lock is also released. See scx_task_iter_start() for details.
1386 static void scx_task_iter_stop(struct scx_task_iter *iter)
1388 list_del_init(&iter->cursor.tasks_node);
1389 scx_task_iter_unlock(iter);
1393 * scx_task_iter_next - Next task
1394 * @iter: iterator to walk
1396 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1397 * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing
1398 * stalls by holding scx_tasks_lock for too long.
1400 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1402 struct list_head *cursor = &iter->cursor.tasks_node;
1403 struct sched_ext_entity *pos;
1405 if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) {
1406 scx_task_iter_unlock(iter);
1408 scx_task_iter_relock(iter);
1411 list_for_each_entry(pos, cursor, tasks_node) {
1412 if (&pos->tasks_node == &scx_tasks)
1414 if (!(pos->flags & SCX_TASK_CURSOR)) {
1415 list_move(cursor, &pos->tasks_node);
1416 return container_of(pos, struct task_struct, scx);
1420 /* can't happen, should always terminate at scx_tasks above */
1425 * scx_task_iter_next_locked - Next non-idle task with its rq locked
1426 * @iter: iterator to walk
1428 * Visit the non-idle task with its rq lock held. Allows callers to specify
1429 * whether they would like to filter out dead tasks. See scx_task_iter_start()
1432 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1434 struct task_struct *p;
1436 __scx_task_iter_rq_unlock(iter);
1438 while ((p = scx_task_iter_next(iter))) {
1440 * scx_task_iter is used to prepare and move tasks into SCX
1441 * while loading the BPF scheduler and vice-versa while
1442 * unloading. The init_tasks ("swappers") should be excluded
1443 * from the iteration because:
1445 * - It's unsafe to use __setschduler_prio() on an init_task to
1446 * determine the sched_class to use as it won't preserve its
1449 * - ops.init/exit_task() can easily be confused if called with
1450 * init_tasks as they, e.g., share PID 0.
1452 * As init_tasks are never scheduled through SCX, they can be
1453 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1454 * doesn't work here:
1456 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1459 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1460 * play_idle_precise() used by CONFIG_IDLE_INJECT.
1462 * Test for idle_sched_class as only init_tasks are on it.
1464 if (p->sched_class != &idle_sched_class)
1470 iter->rq = task_rq_lock(p, &iter->rf);
1476 static enum scx_ops_enable_state scx_ops_enable_state(void)
1478 return atomic_read(&scx_ops_enable_state_var);
1481 static enum scx_ops_enable_state
1482 scx_ops_set_enable_state(enum scx_ops_enable_state to)
1484 return atomic_xchg(&scx_ops_enable_state_var, to);
1487 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1488 enum scx_ops_enable_state from)
1492 return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1495 static bool scx_rq_bypassing(struct rq *rq)
1497 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1501 * wait_ops_state - Busy-wait the specified ops state to end
1503 * @opss: state to wait the end of
1505 * Busy-wait for @p to transition out of @opss. This can only be used when the
1506 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1507 * has load_acquire semantics to ensure that the caller can see the updates made
1508 * in the enqueueing and dispatching paths.
1510 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1514 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1518 * ops_cpu_valid - Verify a cpu number
1519 * @cpu: cpu number which came from a BPF ops
1520 * @where: extra information reported on error
1522 * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1523 * Verify that it is in range and one of the possible cpus. If invalid, trigger
1526 static bool ops_cpu_valid(s32 cpu, const char *where)
1528 if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1531 scx_ops_error("invalid CPU %d%s%s", cpu,
1532 where ? " " : "", where ?: "");
1538 * ops_sanitize_err - Sanitize a -errno value
1539 * @ops_name: operation to blame on failure
1540 * @err: -errno value to sanitize
1542 * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1543 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1544 * cause misbehaviors. For an example, a large negative return from
1545 * ops.init_task() triggers an oops when passed up the call chain because the
1546 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1547 * handled as a pointer.
1549 static int ops_sanitize_err(const char *ops_name, s32 err)
1551 if (err < 0 && err >= -MAX_ERRNO)
1554 scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1558 static void run_deferred(struct rq *rq)
1560 process_ddsp_deferred_locals(rq);
1564 static void deferred_bal_cb_workfn(struct rq *rq)
1570 static void deferred_irq_workfn(struct irq_work *irq_work)
1572 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1574 raw_spin_rq_lock(rq);
1576 raw_spin_rq_unlock(rq);
1580 * schedule_deferred - Schedule execution of deferred actions on an rq
1583 * Schedule execution of deferred actions on @rq. Must be called with @rq
1584 * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1585 * can unlock @rq to e.g. migrate tasks to other rqs.
1587 static void schedule_deferred(struct rq *rq)
1589 lockdep_assert_rq_held(rq);
1593 * If in the middle of waking up a task, task_woken_scx() will be called
1594 * afterwards which will then run the deferred actions, no need to
1595 * schedule anything.
1597 if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1601 * If in balance, the balance callbacks will be called before rq lock is
1602 * released. Schedule one.
1604 if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1605 queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1606 deferred_bal_cb_workfn);
1611 * No scheduler hooks available. Queue an irq work. They are executed on
1612 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1613 * The above WAKEUP and BALANCE paths should cover most of the cases and
1614 * the time to IRQ re-enable shouldn't be long.
1616 irq_work_queue(&rq->scx.deferred_irq_work);
1620 * touch_core_sched - Update timestamp used for core-sched task ordering
1621 * @rq: rq to read clock from, must be locked
1622 * @p: task to update the timestamp for
1624 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1625 * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1626 * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1629 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1631 lockdep_assert_rq_held(rq);
1633 #ifdef CONFIG_SCHED_CORE
1635 * It's okay to update the timestamp spuriously. Use
1636 * sched_core_disabled() which is cheaper than enabled().
1638 * As this is used to determine ordering between tasks of sibling CPUs,
1639 * it may be better to use per-core dispatch sequence instead.
1641 if (!sched_core_disabled())
1642 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1647 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1648 * @rq: rq to read clock from, must be locked
1649 * @p: task being dispatched
1651 * If the BPF scheduler implements custom core-sched ordering via
1652 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1653 * ordering within each local DSQ. This function is called from dispatch paths
1654 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1656 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1658 lockdep_assert_rq_held(rq);
1660 #ifdef CONFIG_SCHED_CORE
1661 if (SCX_HAS_OP(core_sched_before))
1662 touch_core_sched(rq, p);
1666 static void update_curr_scx(struct rq *rq)
1668 struct task_struct *curr = rq->curr;
1671 delta_exec = update_curr_common(rq);
1672 if (unlikely(delta_exec <= 0))
1675 if (curr->scx.slice != SCX_SLICE_INF) {
1676 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1677 if (!curr->scx.slice)
1678 touch_core_sched(rq, curr);
1682 static bool scx_dsq_priq_less(struct rb_node *node_a,
1683 const struct rb_node *node_b)
1685 const struct task_struct *a =
1686 container_of(node_a, struct task_struct, scx.dsq_priq);
1687 const struct task_struct *b =
1688 container_of(node_b, struct task_struct, scx.dsq_priq);
1690 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1693 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1695 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1696 WRITE_ONCE(dsq->nr, dsq->nr + delta);
1699 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1702 bool is_local = dsq->id == SCX_DSQ_LOCAL;
1704 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1705 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1706 !RB_EMPTY_NODE(&p->scx.dsq_priq));
1709 raw_spin_lock(&dsq->lock);
1710 if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1711 scx_ops_error("attempting to dispatch to a destroyed dsq");
1712 /* fall back to the global dsq */
1713 raw_spin_unlock(&dsq->lock);
1714 dsq = find_global_dsq(p);
1715 raw_spin_lock(&dsq->lock);
1719 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1720 (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1722 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1723 * their FIFO queues. To avoid confusion and accidentally
1724 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1725 * disallow any internal DSQ from doing vtime ordering of
1728 scx_ops_error("cannot use vtime ordering for built-in DSQs");
1729 enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1732 if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1733 struct rb_node *rbp;
1736 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1737 * linked to both the rbtree and list on PRIQs, this can only be
1738 * tested easily when adding the first task.
1740 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1741 nldsq_next_task(dsq, NULL, false)))
1742 scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1745 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1746 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1749 * Find the previous task and insert after it on the list so
1750 * that @dsq->list is vtime ordered.
1752 rbp = rb_prev(&p->scx.dsq_priq);
1754 struct task_struct *prev =
1755 container_of(rbp, struct task_struct,
1757 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1759 list_add(&p->scx.dsq_list.node, &dsq->list);
1762 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1763 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1764 scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1767 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1768 list_add(&p->scx.dsq_list.node, &dsq->list);
1770 list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1773 /* seq records the order tasks are queued, used by BPF DSQ iterator */
1775 p->scx.dsq_seq = dsq->seq;
1781 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1782 * direct dispatch path, but we clear them here because the direct
1783 * dispatch verdict may be overridden on the enqueue path during e.g.
1786 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1787 p->scx.ddsp_enq_flags = 0;
1790 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1791 * match waiters' load_acquire.
1793 if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1794 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1797 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1798 bool preempt = false;
1800 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1801 rq->curr->sched_class == &ext_sched_class) {
1802 rq->curr->scx.slice = 0;
1806 if (preempt || sched_class_above(&ext_sched_class,
1807 rq->curr->sched_class))
1810 raw_spin_unlock(&dsq->lock);
1814 static void task_unlink_from_dsq(struct task_struct *p,
1815 struct scx_dispatch_q *dsq)
1817 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1819 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1820 rb_erase(&p->scx.dsq_priq, &dsq->priq);
1821 RB_CLEAR_NODE(&p->scx.dsq_priq);
1822 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1825 list_del_init(&p->scx.dsq_list.node);
1826 dsq_mod_nr(dsq, -1);
1829 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1831 struct scx_dispatch_q *dsq = p->scx.dsq;
1832 bool is_local = dsq == &rq->scx.local_dsq;
1836 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1837 * Unlinking is all that's needed to cancel.
1839 if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1840 list_del_init(&p->scx.dsq_list.node);
1843 * When dispatching directly from the BPF scheduler to a local
1844 * DSQ, the task isn't associated with any DSQ but
1845 * @p->scx.holding_cpu may be set under the protection of
1846 * %SCX_OPSS_DISPATCHING.
1848 if (p->scx.holding_cpu >= 0)
1849 p->scx.holding_cpu = -1;
1855 raw_spin_lock(&dsq->lock);
1858 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1859 * change underneath us.
1861 if (p->scx.holding_cpu < 0) {
1862 /* @p must still be on @dsq, dequeue */
1863 task_unlink_from_dsq(p, dsq);
1866 * We're racing against dispatch_to_local_dsq() which already
1867 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1868 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1871 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1872 p->scx.holding_cpu = -1;
1877 raw_spin_unlock(&dsq->lock);
1880 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
1881 struct task_struct *p)
1883 struct scx_dispatch_q *dsq;
1885 if (dsq_id == SCX_DSQ_LOCAL)
1886 return &rq->scx.local_dsq;
1888 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1889 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1891 if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1892 return find_global_dsq(p);
1894 return &cpu_rq(cpu)->scx.local_dsq;
1897 if (dsq_id == SCX_DSQ_GLOBAL)
1898 dsq = find_global_dsq(p);
1900 dsq = find_user_dsq(dsq_id);
1902 if (unlikely(!dsq)) {
1903 scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
1904 dsq_id, p->comm, p->pid);
1905 return find_global_dsq(p);
1911 static void mark_direct_dispatch(struct task_struct *ddsp_task,
1912 struct task_struct *p, u64 dsq_id,
1916 * Mark that dispatch already happened from ops.select_cpu() or
1917 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1918 * which can never match a valid task pointer.
1920 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1922 /* @p must match the task on the enqueue path */
1923 if (unlikely(p != ddsp_task)) {
1924 if (IS_ERR(ddsp_task))
1925 scx_ops_error("%s[%d] already direct-dispatched",
1928 scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1929 ddsp_task->comm, ddsp_task->pid,
1934 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1935 WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1937 p->scx.ddsp_dsq_id = dsq_id;
1938 p->scx.ddsp_enq_flags = enq_flags;
1941 static void direct_dispatch(struct task_struct *p, u64 enq_flags)
1943 struct rq *rq = task_rq(p);
1944 struct scx_dispatch_q *dsq =
1945 find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
1947 touch_core_sched_dispatch(rq, p);
1949 p->scx.ddsp_enq_flags |= enq_flags;
1952 * We are in the enqueue path with @rq locked and pinned, and thus can't
1953 * double lock a remote rq and enqueue to its local DSQ. For
1954 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1955 * the enqueue so that it's executed when @rq can be unlocked.
1957 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1960 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1962 switch (opss & SCX_OPSS_STATE_MASK) {
1965 case SCX_OPSS_QUEUEING:
1967 * As @p was never passed to the BPF side, _release is
1968 * not strictly necessary. Still do it for consistency.
1970 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1973 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1974 p->comm, p->pid, opss);
1975 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1979 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1980 list_add_tail(&p->scx.dsq_list.node,
1981 &rq->scx.ddsp_deferred_locals);
1982 schedule_deferred(rq);
1986 dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1989 static bool scx_rq_online(struct rq *rq)
1992 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1993 * the online state as seen from the BPF scheduler. cpu_active() test
1994 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1995 * stay set until the current scheduling operation is complete even if
1996 * we aren't locking @rq.
1998 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
2001 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
2004 struct task_struct **ddsp_taskp;
2007 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
2010 if (sticky_cpu == cpu_of(rq))
2011 goto local_norefill;
2014 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
2015 * is offline and are just running the hotplug path. Don't bother the
2018 if (!scx_rq_online(rq))
2021 if (scx_rq_bypassing(rq))
2024 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2027 /* see %SCX_OPS_ENQ_EXITING */
2028 if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
2029 unlikely(p->flags & PF_EXITING))
2032 /* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
2033 if (!static_branch_unlikely(&scx_ops_enq_migration_disabled) &&
2034 is_migration_disabled(p))
2037 if (!SCX_HAS_OP(enqueue))
2040 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
2041 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
2043 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2044 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
2046 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2047 WARN_ON_ONCE(*ddsp_taskp);
2050 SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
2053 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2057 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2058 * dequeue may be waiting. The store_release matches their load_acquire.
2060 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2064 direct_dispatch(p, enq_flags);
2069 * For task-ordering, slice refill must be treated as implying the end
2070 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2071 * higher priority it becomes from scx_prio_less()'s POV.
2073 touch_core_sched(rq, p);
2074 p->scx.slice = SCX_SLICE_DFL;
2076 dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
2080 touch_core_sched(rq, p); /* see the comment in local: */
2081 p->scx.slice = SCX_SLICE_DFL;
2082 dispatch_enqueue(find_global_dsq(p), p, enq_flags);
2085 static bool task_runnable(const struct task_struct *p)
2087 return !list_empty(&p->scx.runnable_node);
2090 static void set_task_runnable(struct rq *rq, struct task_struct *p)
2092 lockdep_assert_rq_held(rq);
2094 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2095 p->scx.runnable_at = jiffies;
2096 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2100 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
2101 * appended to the runnable_list.
2103 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2106 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2108 list_del_init(&p->scx.runnable_node);
2109 if (reset_runnable_at)
2110 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2113 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2115 int sticky_cpu = p->scx.sticky_cpu;
2117 if (enq_flags & ENQUEUE_WAKEUP)
2118 rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2120 enq_flags |= rq->scx.extra_enq_flags;
2122 if (sticky_cpu >= 0)
2123 p->scx.sticky_cpu = -1;
2126 * Restoring a running task will be immediately followed by
2127 * set_next_task_scx() which expects the task to not be on the BPF
2128 * scheduler as tasks can only start running through local DSQs. Force
2129 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2131 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2132 sticky_cpu = cpu_of(rq);
2134 if (p->scx.flags & SCX_TASK_QUEUED) {
2135 WARN_ON_ONCE(!task_runnable(p));
2139 set_task_runnable(rq, p);
2140 p->scx.flags |= SCX_TASK_QUEUED;
2141 rq->scx.nr_running++;
2142 add_nr_running(rq, 1);
2144 if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
2145 SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
2147 if (enq_flags & SCX_ENQ_WAKEUP)
2148 touch_core_sched(rq, p);
2150 do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2152 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2155 static void ops_dequeue(struct task_struct *p, u64 deq_flags)
2159 /* dequeue is always temporary, don't reset runnable_at */
2160 clr_task_runnable(p, false);
2162 /* acquire ensures that we see the preceding updates on QUEUED */
2163 opss = atomic_long_read_acquire(&p->scx.ops_state);
2165 switch (opss & SCX_OPSS_STATE_MASK) {
2168 case SCX_OPSS_QUEUEING:
2170 * QUEUEING is started and finished while holding @p's rq lock.
2171 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2174 case SCX_OPSS_QUEUED:
2175 if (SCX_HAS_OP(dequeue))
2176 SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
2178 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2182 case SCX_OPSS_DISPATCHING:
2184 * If @p is being dispatched from the BPF scheduler to a DSQ,
2185 * wait for the transfer to complete so that @p doesn't get
2186 * added to its DSQ after dequeueing is complete.
2188 * As we're waiting on DISPATCHING with the rq locked, the
2189 * dispatching side shouldn't try to lock the rq while
2190 * DISPATCHING is set. See dispatch_to_local_dsq().
2192 * DISPATCHING shouldn't have qseq set and control can reach
2193 * here with NONE @opss from the above QUEUED case block.
2194 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2196 wait_ops_state(p, SCX_OPSS_DISPATCHING);
2197 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2202 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2204 if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2205 WARN_ON_ONCE(task_runnable(p));
2209 ops_dequeue(p, deq_flags);
2212 * A currently running task which is going off @rq first gets dequeued
2213 * and then stops running. As we want running <-> stopping transitions
2214 * to be contained within runnable <-> quiescent transitions, trigger
2215 * ->stopping() early here instead of in put_prev_task_scx().
2217 * @p may go through multiple stopping <-> running transitions between
2218 * here and put_prev_task_scx() if task attribute changes occur while
2219 * balance_scx() leaves @rq unlocked. However, they don't contain any
2220 * information meaningful to the BPF scheduler and can be suppressed by
2221 * skipping the callbacks if the task is !QUEUED.
2223 if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
2224 update_curr_scx(rq);
2225 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
2228 if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
2229 SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
2231 if (deq_flags & SCX_DEQ_SLEEP)
2232 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2234 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2236 p->scx.flags &= ~SCX_TASK_QUEUED;
2237 rq->scx.nr_running--;
2238 sub_nr_running(rq, 1);
2240 dispatch_dequeue(rq, p);
2244 static void yield_task_scx(struct rq *rq)
2246 struct task_struct *p = rq->curr;
2248 if (SCX_HAS_OP(yield))
2249 SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
2254 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2256 struct task_struct *from = rq->curr;
2258 if (SCX_HAS_OP(yield))
2259 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
2264 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2265 struct scx_dispatch_q *src_dsq,
2268 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2270 /* @dsq is locked and @p is on @dst_rq */
2271 lockdep_assert_held(&src_dsq->lock);
2272 lockdep_assert_rq_held(dst_rq);
2274 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2276 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2277 list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2279 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2281 dsq_mod_nr(dst_dsq, 1);
2282 p->scx.dsq = dst_dsq;
2287 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2289 * @enq_flags: %SCX_ENQ_*
2290 * @src_rq: rq to move the task from, locked on entry, released on return
2291 * @dst_rq: rq to move the task into, locked on return
2293 * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2295 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2296 struct rq *src_rq, struct rq *dst_rq)
2298 lockdep_assert_rq_held(src_rq);
2300 /* the following marks @p MIGRATING which excludes dequeue */
2301 deactivate_task(src_rq, p, 0);
2302 set_task_cpu(p, cpu_of(dst_rq));
2303 p->scx.sticky_cpu = cpu_of(dst_rq);
2305 raw_spin_rq_unlock(src_rq);
2306 raw_spin_rq_lock(dst_rq);
2309 * We want to pass scx-specific enq_flags but activate_task() will
2310 * truncate the upper 32 bit. As we own @rq, we can pass them through
2311 * @rq->scx.extra_enq_flags instead.
2313 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2314 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2315 dst_rq->scx.extra_enq_flags = enq_flags;
2316 activate_task(dst_rq, p, 0);
2317 dst_rq->scx.extra_enq_flags = 0;
2321 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2324 * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2325 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2328 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2329 * must be allowed to finish on the CPU that it's currently on regardless of
2330 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2331 * BPF scheduler shouldn't attempt to migrate a task which has migration
2334 * - The BPF scheduler is bypassed while the rq is offline and we can always say
2335 * no to the BPF scheduler initiated migrations while offline.
2337 * The caller must ensure that @p and @rq are on different CPUs.
2339 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2342 int cpu = cpu_of(rq);
2344 SCHED_WARN_ON(task_cpu(p) == cpu);
2347 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
2348 * the pinned CPU in migrate_disable_switch() while @p is being switched
2349 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
2350 * updated and thus another CPU may see @p on a DSQ inbetween leading to
2351 * @p passing the below task_allowed_on_cpu() check while migration is
2354 * Test the migration disabled state first as the race window is narrow
2355 * and the BPF scheduler failing to check migration disabled state can
2356 * easily be masked if task_allowed_on_cpu() is done first.
2358 if (unlikely(is_migration_disabled(p))) {
2360 scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
2361 p->comm, p->pid, task_cpu(p), cpu);
2366 * We don't require the BPF scheduler to avoid dispatching to offline
2367 * CPUs mostly for convenience but also because CPUs can go offline
2368 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2369 * picked CPU is outside the allowed mask.
2371 if (!task_allowed_on_cpu(p, cpu)) {
2373 scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
2374 cpu, p->comm, p->pid);
2378 if (!scx_rq_online(rq))
2385 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2387 * @dsq: locked DSQ @p is currently on
2388 * @src_rq: rq @p is currently on, stable with @dsq locked
2390 * Called with @dsq locked but no rq's locked. We want to move @p to a different
2391 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2392 * required when transferring into a local DSQ. Even when transferring into a
2393 * non-local DSQ, it's better to use the same mechanism to protect against
2394 * dequeues and maintain the invariant that @p->scx.dsq can only change while
2395 * @src_rq is locked, which e.g. scx_dump_task() depends on.
2397 * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2398 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2399 * this may race with dequeue, which can't drop the rq lock or fail, do a little
2400 * dancing from our side.
2402 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2403 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2404 * would be cleared to -1. While other cpus may have updated it to different
2405 * values afterwards, as this operation can't be preempted or recurse, the
2406 * holding_cpu can never become this CPU again before we're done. Thus, we can
2407 * tell whether we lost to dequeue by testing whether the holding_cpu still
2408 * points to this CPU. See dispatch_dequeue() for the counterpart.
2410 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2411 * still valid. %false if lost to dequeue.
2413 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2414 struct scx_dispatch_q *dsq,
2417 s32 cpu = raw_smp_processor_id();
2419 lockdep_assert_held(&dsq->lock);
2421 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2422 task_unlink_from_dsq(p, dsq);
2423 p->scx.holding_cpu = cpu;
2425 raw_spin_unlock(&dsq->lock);
2426 raw_spin_rq_lock(src_rq);
2428 /* task_rq couldn't have changed if we're still the holding cpu */
2429 return likely(p->scx.holding_cpu == cpu) &&
2430 !WARN_ON_ONCE(src_rq != task_rq(p));
2433 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2434 struct scx_dispatch_q *dsq, struct rq *src_rq)
2436 raw_spin_rq_unlock(this_rq);
2438 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2439 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2442 raw_spin_rq_unlock(src_rq);
2443 raw_spin_rq_lock(this_rq);
2447 #else /* CONFIG_SMP */
2448 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
2449 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
2450 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2451 #endif /* CONFIG_SMP */
2454 * move_task_between_dsqs() - Move a task from one DSQ to another
2456 * @enq_flags: %SCX_ENQ_*
2457 * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2458 * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2460 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2461 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2462 * will change. As @p's task_rq is locked, this function doesn't need to use the
2463 * holding_cpu mechanism.
2465 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2466 * return value, is locked.
2468 static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
2469 struct scx_dispatch_q *src_dsq,
2470 struct scx_dispatch_q *dst_dsq)
2472 struct rq *src_rq = task_rq(p), *dst_rq;
2474 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2475 lockdep_assert_held(&src_dsq->lock);
2476 lockdep_assert_rq_held(src_rq);
2478 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2479 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2480 if (src_rq != dst_rq &&
2481 unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2482 dst_dsq = find_global_dsq(p);
2486 /* no need to migrate if destination is a non-local DSQ */
2491 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2492 * CPU, @p will be migrated.
2494 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2495 /* @p is going from a non-local DSQ to a local DSQ */
2496 if (src_rq == dst_rq) {
2497 task_unlink_from_dsq(p, src_dsq);
2498 move_local_task_to_local_dsq(p, enq_flags,
2500 raw_spin_unlock(&src_dsq->lock);
2502 raw_spin_unlock(&src_dsq->lock);
2503 move_remote_task_to_local_dsq(p, enq_flags,
2508 * @p is going from a non-local DSQ to a non-local DSQ. As
2509 * $src_dsq is already locked, do an abbreviated dequeue.
2511 task_unlink_from_dsq(p, src_dsq);
2513 raw_spin_unlock(&src_dsq->lock);
2515 dispatch_enqueue(dst_dsq, p, enq_flags);
2522 * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
2523 * banging on the same DSQ on a large NUMA system to the point where switching
2524 * to the bypass mode can take a long time. Inject artificial delays while the
2525 * bypass mode is switching to guarantee timely completion.
2527 static void scx_ops_breather(struct rq *rq)
2531 lockdep_assert_rq_held(rq);
2533 if (likely(!atomic_read(&scx_ops_breather_depth)))
2536 raw_spin_rq_unlock(rq);
2538 until = ktime_get_ns() + NSEC_PER_MSEC;
2542 while (atomic_read(&scx_ops_breather_depth) && --cnt)
2544 } while (atomic_read(&scx_ops_breather_depth) &&
2545 time_before64(ktime_get_ns(), until));
2547 raw_spin_rq_lock(rq);
2550 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2552 struct task_struct *p;
2555 * This retry loop can repeatedly race against scx_ops_bypass()
2556 * dequeueing tasks from @dsq trying to put the system into the bypass
2557 * mode. On some multi-socket machines (e.g. 2x Intel 8480c), this can
2558 * live-lock the machine into soft lockups. Give a breather.
2560 scx_ops_breather(rq);
2563 * The caller can't expect to successfully consume a task if the task's
2564 * addition to @dsq isn't guaranteed to be visible somehow. Test
2565 * @dsq->list without locking and skip if it seems empty.
2567 if (list_empty(&dsq->list))
2570 raw_spin_lock(&dsq->lock);
2572 nldsq_for_each_task(p, dsq) {
2573 struct rq *task_rq = task_rq(p);
2575 if (rq == task_rq) {
2576 task_unlink_from_dsq(p, dsq);
2577 move_local_task_to_local_dsq(p, 0, dsq, rq);
2578 raw_spin_unlock(&dsq->lock);
2582 if (task_can_run_on_remote_rq(p, rq, false)) {
2583 if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2589 raw_spin_unlock(&dsq->lock);
2593 static bool consume_global_dsq(struct rq *rq)
2595 int node = cpu_to_node(cpu_of(rq));
2597 return consume_dispatch_q(rq, global_dsqs[node]);
2601 * dispatch_to_local_dsq - Dispatch a task to a local dsq
2602 * @rq: current rq which is locked
2603 * @dst_dsq: destination DSQ
2604 * @p: task to dispatch
2605 * @enq_flags: %SCX_ENQ_*
2607 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2608 * DSQ. This function performs all the synchronization dancing needed because
2609 * local DSQs are protected with rq locks.
2611 * The caller must have exclusive ownership of @p (e.g. through
2612 * %SCX_OPSS_DISPATCHING).
2614 static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2615 struct task_struct *p, u64 enq_flags)
2617 struct rq *src_rq = task_rq(p);
2618 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2620 struct rq *locked_rq = rq;
2624 * We're synchronized against dequeue through DISPATCHING. As @p can't
2625 * be dequeued, its task_rq and cpus_allowed are stable too.
2627 * If dispatching to @rq that @p is already on, no lock dancing needed.
2629 if (rq == src_rq && rq == dst_rq) {
2630 dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2635 if (src_rq != dst_rq &&
2636 unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2637 dispatch_enqueue(find_global_dsq(p), p,
2638 enq_flags | SCX_ENQ_CLEAR_OPSS);
2643 * @p is on a possibly remote @src_rq which we need to lock to move the
2644 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2645 * on DISPATCHING, so we can't grab @src_rq lock while holding
2648 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2649 * we're moving from a DSQ and use the same mechanism - mark the task
2650 * under transfer with holding_cpu, release DISPATCHING and then follow
2651 * the same protocol. See unlink_dsq_and_lock_src_rq().
2653 p->scx.holding_cpu = raw_smp_processor_id();
2655 /* store_release ensures that dequeue sees the above */
2656 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2658 /* switch to @src_rq lock */
2659 if (locked_rq != src_rq) {
2660 raw_spin_rq_unlock(locked_rq);
2662 raw_spin_rq_lock(src_rq);
2665 /* task_rq couldn't have changed if we're still the holding cpu */
2666 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2667 !WARN_ON_ONCE(src_rq != task_rq(p))) {
2669 * If @p is staying on the same rq, there's no need to go
2670 * through the full deactivate/activate cycle. Optimize by
2671 * abbreviating move_remote_task_to_local_dsq().
2673 if (src_rq == dst_rq) {
2674 p->scx.holding_cpu = -1;
2675 dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2677 move_remote_task_to_local_dsq(p, enq_flags,
2679 /* task has been moved to dst_rq, which is now locked */
2683 /* if the destination CPU is idle, wake it up */
2684 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2685 resched_curr(dst_rq);
2688 /* switch back to @rq lock */
2689 if (locked_rq != rq) {
2690 raw_spin_rq_unlock(locked_rq);
2691 raw_spin_rq_lock(rq);
2693 #else /* CONFIG_SMP */
2694 BUG(); /* control can not reach here on UP */
2695 #endif /* CONFIG_SMP */
2699 * finish_dispatch - Asynchronously finish dispatching a task
2700 * @rq: current rq which is locked
2701 * @p: task to finish dispatching
2702 * @qseq_at_dispatch: qseq when @p started getting dispatched
2703 * @dsq_id: destination DSQ ID
2704 * @enq_flags: %SCX_ENQ_*
2706 * Dispatching to local DSQs may need to wait for queueing to complete or
2707 * require rq lock dancing. As we don't wanna do either while inside
2708 * ops.dispatch() to avoid locking order inversion, we split dispatching into
2709 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2710 * task and its qseq. Once ops.dispatch() returns, this function is called to
2713 * There is no guarantee that @p is still valid for dispatching or even that it
2714 * was valid in the first place. Make sure that the task is still owned by the
2715 * BPF scheduler and claim the ownership before dispatching.
2717 static void finish_dispatch(struct rq *rq, struct task_struct *p,
2718 unsigned long qseq_at_dispatch,
2719 u64 dsq_id, u64 enq_flags)
2721 struct scx_dispatch_q *dsq;
2724 touch_core_sched_dispatch(rq, p);
2727 * No need for _acquire here. @p is accessed only after a successful
2728 * try_cmpxchg to DISPATCHING.
2730 opss = atomic_long_read(&p->scx.ops_state);
2732 switch (opss & SCX_OPSS_STATE_MASK) {
2733 case SCX_OPSS_DISPATCHING:
2735 /* someone else already got to it */
2737 case SCX_OPSS_QUEUED:
2739 * If qseq doesn't match, @p has gone through at least one
2740 * dispatch/dequeue and re-enqueue cycle between
2741 * scx_bpf_dsq_insert() and here and we have no claim on it.
2743 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2747 * While we know @p is accessible, we don't yet have a claim on
2748 * it - the BPF scheduler is allowed to dispatch tasks
2749 * spuriously and there can be a racing dequeue attempt. Let's
2750 * claim @p by atomically transitioning it from QUEUED to
2753 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2754 SCX_OPSS_DISPATCHING)))
2757 case SCX_OPSS_QUEUEING:
2759 * do_enqueue_task() is in the process of transferring the task
2760 * to the BPF scheduler while holding @p's rq lock. As we aren't
2761 * holding any kernel or BPF resource that the enqueue path may
2762 * depend upon, it's safe to wait.
2764 wait_ops_state(p, opss);
2768 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2770 dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2772 if (dsq->id == SCX_DSQ_LOCAL)
2773 dispatch_to_local_dsq(rq, dsq, p, enq_flags);
2775 dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2778 static void flush_dispatch_buf(struct rq *rq)
2780 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2783 for (u = 0; u < dspc->cursor; u++) {
2784 struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2786 finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
2790 dspc->nr_tasks += dspc->cursor;
2794 static int balance_one(struct rq *rq, struct task_struct *prev)
2796 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2797 bool prev_on_scx = prev->sched_class == &ext_sched_class;
2798 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
2799 int nr_loops = SCX_DSP_MAX_LOOPS;
2801 lockdep_assert_rq_held(rq);
2802 rq->scx.flags |= SCX_RQ_IN_BALANCE;
2803 rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
2805 if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2806 unlikely(rq->scx.cpu_released)) {
2808 * If the previous sched_class for the current CPU was not SCX,
2809 * notify the BPF scheduler that it again has control of the
2810 * core. This callback complements ->cpu_release(), which is
2811 * emitted in switch_class().
2813 if (SCX_HAS_OP(cpu_acquire))
2814 SCX_CALL_OP(SCX_KF_REST, cpu_acquire, cpu_of(rq), NULL);
2815 rq->scx.cpu_released = false;
2819 update_curr_scx(rq);
2822 * If @prev is runnable & has slice left, it has priority and
2823 * fetching more just increases latency for the fetched tasks.
2824 * Tell pick_task_scx() to keep running @prev. If the BPF
2825 * scheduler wants to handle this explicitly, it should
2826 * implement ->cpu_release().
2828 * See scx_ops_disable_workfn() for the explanation on the
2831 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
2832 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2837 /* if there already are tasks to run, nothing to do */
2838 if (rq->scx.local_dsq.nr)
2841 if (consume_global_dsq(rq))
2844 if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
2850 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2851 * the local DSQ might still end up empty after a successful
2852 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2853 * produced some tasks, retry. The BPF scheduler may depend on this
2854 * looping behavior to simplify its implementation.
2859 SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2860 prev_on_scx ? prev : NULL);
2862 flush_dispatch_buf(rq);
2864 if (prev_on_rq && prev->scx.slice) {
2865 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2868 if (rq->scx.local_dsq.nr)
2870 if (consume_global_dsq(rq))
2874 * ops.dispatch() can trap us in this loop by repeatedly
2875 * dispatching ineligible tasks. Break out once in a while to
2876 * allow the watchdog to run. As IRQ can't be enabled in
2877 * balance(), we want to complete this scheduling cycle and then
2878 * start a new one. IOW, we want to call resched_curr() on the
2879 * next, most likely idle, task, not the current one. Use
2880 * scx_bpf_kick_cpu() for deferred kicking.
2882 if (unlikely(!--nr_loops)) {
2883 scx_bpf_kick_cpu(cpu_of(rq), 0);
2886 } while (dspc->nr_tasks);
2890 * Didn't find another task to run. Keep running @prev unless
2891 * %SCX_OPS_ENQ_LAST is in effect.
2893 if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
2894 scx_rq_bypassing(rq))) {
2895 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2898 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2902 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2906 static int balance_scx(struct rq *rq, struct task_struct *prev,
2907 struct rq_flags *rf)
2911 rq_unpin_lock(rq, rf);
2913 ret = balance_one(rq, prev);
2915 #ifdef CONFIG_SCHED_SMT
2917 * When core-sched is enabled, this ops.balance() call will be followed
2918 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
2921 if (sched_core_enabled(rq)) {
2922 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2925 for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2926 struct rq *srq = cpu_rq(scpu);
2927 struct task_struct *sprev = srq->curr;
2929 WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2930 update_rq_clock(srq);
2931 balance_one(srq, sprev);
2935 rq_repin_lock(rq, rf);
2940 static void process_ddsp_deferred_locals(struct rq *rq)
2942 struct task_struct *p;
2944 lockdep_assert_rq_held(rq);
2947 * Now that @rq can be unlocked, execute the deferred enqueueing of
2948 * tasks directly dispatched to the local DSQs of other CPUs. See
2949 * direct_dispatch(). Keep popping from the head instead of using
2950 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2953 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2954 struct task_struct, scx.dsq_list.node))) {
2955 struct scx_dispatch_q *dsq;
2957 list_del_init(&p->scx.dsq_list.node);
2959 dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2960 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2961 dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
2965 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2967 if (p->scx.flags & SCX_TASK_QUEUED) {
2969 * Core-sched might decide to execute @p before it is
2970 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2972 ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
2973 dispatch_dequeue(rq, p);
2976 p->se.exec_start = rq_clock_task(rq);
2978 /* see dequeue_task_scx() on why we skip when !QUEUED */
2979 if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
2980 SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
2982 clr_task_runnable(p, true);
2985 * @p is getting newly scheduled or got kicked after someone updated its
2986 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2988 if ((p->scx.slice == SCX_SLICE_INF) !=
2989 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2990 if (p->scx.slice == SCX_SLICE_INF)
2991 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2993 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2995 sched_update_tick_dependency(rq);
2998 * For now, let's refresh the load_avgs just when transitioning
2999 * in and out of nohz. In the future, we might want to add a
3000 * mechanism which calls the following periodically on
3001 * tick-stopped CPUs.
3003 update_other_load_avgs(rq);
3007 static enum scx_cpu_preempt_reason
3008 preempt_reason_from_class(const struct sched_class *class)
3011 if (class == &stop_sched_class)
3012 return SCX_CPU_PREEMPT_STOP;
3014 if (class == &dl_sched_class)
3015 return SCX_CPU_PREEMPT_DL;
3016 if (class == &rt_sched_class)
3017 return SCX_CPU_PREEMPT_RT;
3018 return SCX_CPU_PREEMPT_UNKNOWN;
3021 static void switch_class(struct rq *rq, struct task_struct *next)
3023 const struct sched_class *next_class = next->sched_class;
3027 * Pairs with the smp_load_acquire() issued by a CPU in
3028 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
3031 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
3033 if (!static_branch_unlikely(&scx_ops_cpu_preempt))
3037 * The callback is conceptually meant to convey that the CPU is no
3038 * longer under the control of SCX. Therefore, don't invoke the callback
3039 * if the next class is below SCX (in which case the BPF scheduler has
3040 * actively decided not to schedule any tasks on the CPU).
3042 if (sched_class_above(&ext_sched_class, next_class))
3046 * At this point we know that SCX was preempted by a higher priority
3047 * sched_class, so invoke the ->cpu_release() callback if we have not
3048 * done so already. We only send the callback once between SCX being
3049 * preempted, and it regaining control of the CPU.
3051 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
3052 * next time that balance_scx() is invoked.
3054 if (!rq->scx.cpu_released) {
3055 if (SCX_HAS_OP(cpu_release)) {
3056 struct scx_cpu_release_args args = {
3057 .reason = preempt_reason_from_class(next_class),
3061 SCX_CALL_OP(SCX_KF_CPU_RELEASE,
3062 cpu_release, cpu_of(rq), &args);
3064 rq->scx.cpu_released = true;
3068 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3069 struct task_struct *next)
3071 update_curr_scx(rq);
3073 /* see dequeue_task_scx() on why we skip when !QUEUED */
3074 if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3075 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
3077 if (p->scx.flags & SCX_TASK_QUEUED) {
3078 set_task_runnable(rq, p);
3081 * If @p has slice left and is being put, @p is getting
3082 * preempted by a higher priority scheduler class or core-sched
3083 * forcing a different task. Leave it at the head of the local
3086 if (p->scx.slice && !scx_rq_bypassing(rq)) {
3087 dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
3092 * If @p is runnable but we're about to enter a lower
3093 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3094 * ops.enqueue() that @p is the only one available for this cpu,
3095 * which should trigger an explicit follow-up scheduling event.
3097 if (sched_class_above(&ext_sched_class, next->sched_class)) {
3098 WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
3099 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3101 do_enqueue_task(rq, p, 0, -1);
3106 if (next && next->sched_class != &ext_sched_class)
3107 switch_class(rq, next);
3110 static struct task_struct *first_local_task(struct rq *rq)
3112 return list_first_entry_or_null(&rq->scx.local_dsq.list,
3113 struct task_struct, scx.dsq_list.node);
3116 static struct task_struct *pick_task_scx(struct rq *rq)
3118 struct task_struct *prev = rq->curr;
3119 struct task_struct *p;
3120 bool prev_on_scx = prev->sched_class == &ext_sched_class;
3121 bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3122 bool kick_idle = false;
3127 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
3128 * have gone through balance_scx(). Unfortunately, there currently is a
3129 * bug where fair could say yes on balance() but no on pick_task(),
3130 * which then ends up calling pick_task_scx() without preceding
3133 * Keep running @prev if possible and avoid stalling from entering idle
3134 * without balancing.
3136 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
3137 * if pick_task_scx() is called without preceding balance_scx().
3139 if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
3146 } else if (unlikely(keep_prev && !prev_on_scx)) {
3147 /* only allowed during transitions */
3148 WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
3153 * If balance_scx() is telling us to keep running @prev, replenish slice
3154 * if necessary and keep running @prev. Otherwise, pop the first one
3155 * from the local DSQ.
3160 p->scx.slice = SCX_SLICE_DFL;
3162 p = first_local_task(rq);
3165 scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
3169 if (unlikely(!p->scx.slice)) {
3170 if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
3171 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3172 p->comm, p->pid, __func__);
3173 scx_warned_zero_slice = true;
3175 p->scx.slice = SCX_SLICE_DFL;
3182 #ifdef CONFIG_SCHED_CORE
3184 * scx_prio_less - Task ordering for core-sched
3187 * @in_fi: in forced idle state
3189 * Core-sched is implemented as an additional scheduling layer on top of the
3190 * usual sched_class'es and needs to find out the expected task ordering. For
3191 * SCX, core-sched calls this function to interrogate the task ordering.
3193 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3194 * to implement the default task ordering. The older the timestamp, the higher
3195 * priority the task - the global FIFO ordering matching the default scheduling
3198 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3199 * implement FIFO ordering within each local DSQ. See pick_task_scx().
3201 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3205 * The const qualifiers are dropped from task_struct pointers when
3206 * calling ops.core_sched_before(). Accesses are controlled by the
3209 if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
3210 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
3211 (struct task_struct *)a,
3212 (struct task_struct *)b);
3214 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3216 #endif /* CONFIG_SCHED_CORE */
3220 static bool test_and_clear_cpu_idle(int cpu)
3222 #ifdef CONFIG_SCHED_SMT
3224 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
3225 * cluster is not wholly idle either way. This also prevents
3226 * scx_pick_idle_cpu() from getting caught in an infinite loop.
3228 if (sched_smt_active()) {
3229 const struct cpumask *smt = cpu_smt_mask(cpu);
3232 * If offline, @cpu is not its own sibling and
3233 * scx_pick_idle_cpu() can get caught in an infinite loop as
3234 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
3235 * is eventually cleared.
3237 * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
3238 * reduce memory writes, which may help alleviate cache
3239 * coherence pressure.
3241 if (cpumask_intersects(smt, idle_masks.smt))
3242 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3243 else if (cpumask_test_cpu(cpu, idle_masks.smt))
3244 __cpumask_clear_cpu(cpu, idle_masks.smt);
3247 return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
3250 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
3255 if (sched_smt_active()) {
3256 cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
3257 if (cpu < nr_cpu_ids)
3260 if (flags & SCX_PICK_IDLE_CORE)
3264 cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
3265 if (cpu >= nr_cpu_ids)
3269 if (test_and_clear_cpu_idle(cpu))
3276 * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
3277 * domain is not defined).
3279 static unsigned int llc_weight(s32 cpu)
3281 struct sched_domain *sd;
3283 sd = rcu_dereference(per_cpu(sd_llc, cpu));
3287 return sd->span_weight;
3291 * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
3292 * domain is not defined).
3294 static struct cpumask *llc_span(s32 cpu)
3296 struct sched_domain *sd;
3298 sd = rcu_dereference(per_cpu(sd_llc, cpu));
3302 return sched_domain_span(sd);
3306 * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
3307 * NUMA domain is not defined).
3309 static unsigned int numa_weight(s32 cpu)
3311 struct sched_domain *sd;
3312 struct sched_group *sg;
3314 sd = rcu_dereference(per_cpu(sd_numa, cpu));
3321 return sg->group_weight;
3325 * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
3326 * domain is not defined).
3328 static struct cpumask *numa_span(s32 cpu)
3330 struct sched_domain *sd;
3331 struct sched_group *sg;
3333 sd = rcu_dereference(per_cpu(sd_numa, cpu));
3340 return sched_group_span(sg);
3344 * Return true if the LLC domains do not perfectly overlap with the NUMA
3345 * domains, false otherwise.
3347 static bool llc_numa_mismatch(void)
3352 * We need to scan all online CPUs to verify whether their scheduling
3355 * While it is rare to encounter architectures with asymmetric NUMA
3356 * topologies, CPU hotplugging or virtualized environments can result
3357 * in asymmetric configurations.
3362 * - LLC 0: cpu0..cpu7
3363 * - LLC 1: cpu8..cpu15 [offline]
3366 * - LLC 0: cpu16..cpu23
3367 * - LLC 1: cpu24..cpu31
3369 * In this case, if we only check the first online CPU (cpu0), we might
3370 * incorrectly assume that the LLC and NUMA domains are fully
3371 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
3374 for_each_online_cpu(cpu)
3375 if (llc_weight(cpu) != numa_weight(cpu))
3382 * Initialize topology-aware scheduling.
3384 * Detect if the system has multiple LLC or multiple NUMA domains and enable
3385 * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
3388 * Assumption: the kernel's internal topology representation assumes that each
3389 * CPU belongs to a single LLC domain, and that each LLC domain is entirely
3390 * contained within a single NUMA node.
3392 static void update_selcpu_topology(void)
3394 bool enable_llc = false, enable_numa = false;
3395 unsigned int nr_cpus;
3396 s32 cpu = cpumask_first(cpu_online_mask);
3399 * Enable LLC domain optimization only when there are multiple LLC
3400 * domains among the online CPUs. If all online CPUs are part of a
3401 * single LLC domain, the idle CPU selection logic can choose any
3402 * online CPU without bias.
3404 * Note that it is sufficient to check the LLC domain of the first
3405 * online CPU to determine whether a single LLC domain includes all
3409 nr_cpus = llc_weight(cpu);
3411 if (nr_cpus < num_online_cpus())
3413 pr_debug("sched_ext: LLC=%*pb weight=%u\n",
3414 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
3418 * Enable NUMA optimization only when there are multiple NUMA domains
3419 * among the online CPUs and the NUMA domains don't perfectly overlaps
3420 * with the LLC domains.
3422 * If all CPUs belong to the same NUMA node and the same LLC domain,
3423 * enabling both NUMA and LLC optimizations is unnecessary, as checking
3424 * for an idle CPU in the same domain twice is redundant.
3426 nr_cpus = numa_weight(cpu);
3428 if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
3430 pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
3431 cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
3435 pr_debug("sched_ext: LLC idle selection %s\n",
3436 str_enabled_disabled(enable_llc));
3437 pr_debug("sched_ext: NUMA idle selection %s\n",
3438 str_enabled_disabled(enable_numa));
3441 static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
3443 static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
3445 static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
3447 static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
3451 * Built-in CPU idle selection policy:
3453 * 1. Prioritize full-idle cores:
3454 * - always prioritize CPUs from fully idle cores (both logical CPUs are
3455 * idle) to avoid interference caused by SMT.
3457 * 2. Reuse the same CPU:
3458 * - prefer the last used CPU to take advantage of cached data (L1, L2) and
3459 * branch prediction optimizations.
3461 * 3. Pick a CPU within the same LLC (Last-Level Cache):
3462 * - if the above conditions aren't met, pick a CPU that shares the same LLC
3463 * to maintain cache locality.
3465 * 4. Pick a CPU within the same NUMA node, if enabled:
3466 * - choose a CPU from the same NUMA node to reduce memory access latency.
3468 * 5. Pick any idle CPU usable by the task.
3470 * Step 3 and 4 are performed only if the system has, respectively, multiple
3471 * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
3472 * scx_selcpu_topo_numa).
3474 * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
3475 * we never call ops.select_cpu() for them, see select_task_rq().
3477 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
3478 u64 wake_flags, bool *found)
3480 const struct cpumask *llc_cpus = NULL;
3481 const struct cpumask *numa_cpus = NULL;
3487 * This is necessary to protect llc_cpus.
3492 * Determine the scheduling domain only if the task is allowed to run
3495 * This is done primarily for efficiency, as it avoids the overhead of
3496 * updating a cpumask every time we need to select an idle CPU (which
3497 * can be costly in large SMP systems), but it also aligns logically:
3498 * if a task's scheduling domain is restricted by user-space (through
3499 * CPU affinity), the task will simply use the flat scheduling domain
3500 * defined by user-space.
3502 if (p->nr_cpus_allowed >= num_possible_cpus()) {
3503 if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
3504 numa_cpus = numa_span(prev_cpu);
3506 if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
3507 llc_cpus = llc_span(prev_cpu);
3511 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
3513 if (wake_flags & SCX_WAKE_SYNC) {
3514 cpu = smp_processor_id();
3517 * If the waker's CPU is cache affine and prev_cpu is idle,
3518 * then avoid a migration.
3520 if (cpus_share_cache(cpu, prev_cpu) &&
3521 test_and_clear_cpu_idle(prev_cpu)) {
3527 * If the waker's local DSQ is empty, and the system is under
3528 * utilized, try to wake up @p to the local DSQ of the waker.
3530 * Checking only for an empty local DSQ is insufficient as it
3531 * could give the wakee an unfair advantage when the system is
3534 * Checking only for the presence of idle CPUs is also
3535 * insufficient as the local DSQ of the waker could have tasks
3536 * piled up on it even if there is an idle core elsewhere on
3539 if (!cpumask_empty(idle_masks.cpu) &&
3540 !(current->flags & PF_EXITING) &&
3541 cpu_rq(cpu)->scx.local_dsq.nr == 0) {
3542 if (cpumask_test_cpu(cpu, p->cpus_ptr))
3548 * If CPU has SMT, any wholly idle CPU is likely a better pick than
3549 * partially idle @prev_cpu.
3551 if (sched_smt_active()) {
3553 * Keep using @prev_cpu if it's part of a fully idle core.
3555 if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
3556 test_and_clear_cpu_idle(prev_cpu)) {
3562 * Search for any fully idle core in the same LLC domain.
3565 cpu = scx_pick_idle_cpu(llc_cpus, SCX_PICK_IDLE_CORE);
3571 * Search for any fully idle core in the same NUMA node.
3574 cpu = scx_pick_idle_cpu(numa_cpus, SCX_PICK_IDLE_CORE);
3580 * Search for any full idle core usable by the task.
3582 cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
3588 * Use @prev_cpu if it's idle.
3590 if (test_and_clear_cpu_idle(prev_cpu)) {
3596 * Search for any idle CPU in the same LLC domain.
3599 cpu = scx_pick_idle_cpu(llc_cpus, 0);
3605 * Search for any idle CPU in the same NUMA node.
3608 cpu = scx_pick_idle_cpu(numa_cpus, 0);
3614 * Search for any idle CPU usable by the task.
3616 cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
3630 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3633 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3634 * can be a good migration opportunity with low cache and memory
3635 * footprint. Returning a CPU different than @prev_cpu triggers
3636 * immediate rq migration. However, for SCX, as the current rq
3637 * association doesn't dictate where the task is going to run, this
3638 * doesn't fit well. If necessary, we can later add a dedicated method
3639 * which can decide to preempt self to force it through the regular
3642 if (unlikely(wake_flags & WF_EXEC))
3645 if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) {
3647 struct task_struct **ddsp_taskp;
3649 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3650 WARN_ON_ONCE(*ddsp_taskp);
3653 cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3654 select_cpu, p, prev_cpu, wake_flags);
3656 if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
3664 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
3666 p->scx.slice = SCX_SLICE_DFL;
3667 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3673 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3678 static void set_cpus_allowed_scx(struct task_struct *p,
3679 struct affinity_context *ac)
3681 set_cpus_allowed_common(p, ac);
3684 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3685 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3686 * scheduler the effective one.
3688 * Fine-grained memory write control is enforced by BPF making the const
3689 * designation pointless. Cast it away when calling the operation.
3691 if (SCX_HAS_OP(set_cpumask))
3692 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3693 (struct cpumask *)p->cpus_ptr);
3696 static void reset_idle_masks(void)
3699 * Consider all online cpus idle. Should converge to the actual state
3702 cpumask_copy(idle_masks.cpu, cpu_online_mask);
3703 cpumask_copy(idle_masks.smt, cpu_online_mask);
3706 static void update_builtin_idle(int cpu, bool idle)
3708 assign_cpu(cpu, idle_masks.cpu, idle);
3710 #ifdef CONFIG_SCHED_SMT
3711 if (sched_smt_active()) {
3712 const struct cpumask *smt = cpu_smt_mask(cpu);
3716 * idle_masks.smt handling is racy but that's fine as
3717 * it's only for optimization and self-correcting.
3719 if (!cpumask_subset(smt, idle_masks.cpu))
3721 cpumask_or(idle_masks.smt, idle_masks.smt, smt);
3723 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3730 * Update the idle state of a CPU to @idle.
3732 * If @do_notify is true, ops.update_idle() is invoked to notify the scx
3733 * scheduler of an actual idle state transition (idle to busy or vice
3734 * versa). If @do_notify is false, only the idle state in the idle masks is
3735 * refreshed without invoking ops.update_idle().
3737 * This distinction is necessary, because an idle CPU can be "reserved" and
3738 * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
3739 * busy even if no tasks are dispatched. In this case, the CPU may return
3740 * to idle without a true state transition. Refreshing the idle masks
3741 * without invoking ops.update_idle() ensures accurate idle state tracking
3742 * while avoiding unnecessary updates and maintaining balanced state
3745 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
3747 int cpu = cpu_of(rq);
3749 lockdep_assert_rq_held(rq);
3752 * Trigger ops.update_idle() only when transitioning from a task to
3753 * the idle thread and vice versa.
3755 * Idle transitions are indicated by do_notify being set to true,
3756 * managed by put_prev_task_idle()/set_next_task_idle().
3758 if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
3759 SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
3762 * Update the idle masks:
3763 * - for real idle transitions (do_notify == true)
3764 * - for idle-to-idle transitions (indicated by the previous task
3765 * being the idle thread, managed by pick_task_idle())
3767 * Skip updating idle masks if the previous task is not the idle
3768 * thread, since set_next_task_idle() has already handled it when
3769 * transitioning from a task to the idle thread (calling this
3770 * function with do_notify == true).
3772 * In this way we can avoid updating the idle masks twice,
3775 if (static_branch_likely(&scx_builtin_idle_enabled))
3776 if (do_notify || is_idle_task(rq->curr))
3777 update_builtin_idle(cpu, idle);
3780 static void handle_hotplug(struct rq *rq, bool online)
3782 int cpu = cpu_of(rq);
3784 atomic_long_inc(&scx_hotplug_seq);
3787 update_selcpu_topology();
3789 if (online && SCX_HAS_OP(cpu_online))
3790 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
3791 else if (!online && SCX_HAS_OP(cpu_offline))
3792 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
3794 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3795 "cpu %d going %s, exiting scheduler", cpu,
3796 online ? "online" : "offline");
3799 void scx_rq_activate(struct rq *rq)
3801 handle_hotplug(rq, true);
3804 void scx_rq_deactivate(struct rq *rq)
3806 handle_hotplug(rq, false);
3809 static void rq_online_scx(struct rq *rq)
3811 rq->scx.flags |= SCX_RQ_ONLINE;
3814 static void rq_offline_scx(struct rq *rq)
3816 rq->scx.flags &= ~SCX_RQ_ONLINE;
3819 #else /* CONFIG_SMP */
3821 static bool test_and_clear_cpu_idle(int cpu) { return false; }
3822 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
3823 static void reset_idle_masks(void) {}
3825 #endif /* CONFIG_SMP */
3827 static bool check_rq_for_timeouts(struct rq *rq)
3829 struct task_struct *p;
3831 bool timed_out = false;
3833 rq_lock_irqsave(rq, &rf);
3834 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3835 unsigned long last_runnable = p->scx.runnable_at;
3837 if (unlikely(time_after(jiffies,
3838 last_runnable + scx_watchdog_timeout))) {
3839 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3841 scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3842 "%s[%d] failed to run for %u.%03us",
3844 dur_ms / 1000, dur_ms % 1000);
3849 rq_unlock_irqrestore(rq, &rf);
3854 static void scx_watchdog_workfn(struct work_struct *work)
3858 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3860 for_each_online_cpu(cpu) {
3861 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3866 queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3867 scx_watchdog_timeout / 2);
3870 void scx_tick(struct rq *rq)
3872 unsigned long last_check;
3877 last_check = READ_ONCE(scx_watchdog_timestamp);
3878 if (unlikely(time_after(jiffies,
3879 last_check + READ_ONCE(scx_watchdog_timeout)))) {
3880 u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3882 scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3883 "watchdog failed to check in for %u.%03us",
3884 dur_ms / 1000, dur_ms % 1000);
3887 update_other_load_avgs(rq);
3890 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3892 update_curr_scx(rq);
3895 * While disabling, always resched and refresh core-sched timestamp as
3896 * we can't trust the slice management or ops.core_sched_before().
3898 if (scx_rq_bypassing(rq)) {
3899 curr->scx.slice = 0;
3900 touch_core_sched(rq, curr);
3901 } else if (SCX_HAS_OP(tick)) {
3902 SCX_CALL_OP_TASK(SCX_KF_REST, tick, curr);
3905 if (!curr->scx.slice)
3909 #ifdef CONFIG_EXT_GROUP_SCHED
3910 static struct cgroup *tg_cgrp(struct task_group *tg)
3913 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3914 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3917 if (tg && tg->css.cgroup)
3918 return tg->css.cgroup;
3920 return &cgrp_dfl_root.cgrp;
3923 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
3925 #else /* CONFIG_EXT_GROUP_SCHED */
3927 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3929 #endif /* CONFIG_EXT_GROUP_SCHED */
3931 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3933 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3936 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3938 enum scx_task_state prev_state = scx_get_task_state(p);
3941 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3947 warn = prev_state != SCX_TASK_NONE;
3949 case SCX_TASK_READY:
3950 warn = prev_state == SCX_TASK_NONE;
3952 case SCX_TASK_ENABLED:
3953 warn = prev_state != SCX_TASK_READY;
3960 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3961 prev_state, state, p->comm, p->pid);
3963 p->scx.flags &= ~SCX_TASK_STATE_MASK;
3964 p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3967 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3971 p->scx.disallow = false;
3973 if (SCX_HAS_OP(init_task)) {
3974 struct scx_init_task_args args = {
3975 SCX_INIT_TASK_ARGS_CGROUP(tg)
3979 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
3980 if (unlikely(ret)) {
3981 ret = ops_sanitize_err("init_task", ret);
3986 scx_set_task_state(p, SCX_TASK_INIT);
3988 if (p->scx.disallow) {
3993 rq = task_rq_lock(p, &rf);
3996 * We're in the load path and @p->policy will be applied
3997 * right after. Reverting @p->policy here and rejecting
3998 * %SCHED_EXT transitions from scx_check_setscheduler()
3999 * guarantees that if ops.init_task() sets @p->disallow,
4000 * @p can never be in SCX.
4002 if (p->policy == SCHED_EXT) {
4003 p->policy = SCHED_NORMAL;
4004 atomic_long_inc(&scx_nr_rejected);
4007 task_rq_unlock(rq, p, &rf);
4008 } else if (p->policy == SCHED_EXT) {
4009 scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
4014 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
4018 static void scx_ops_enable_task(struct task_struct *p)
4022 lockdep_assert_rq_held(task_rq(p));
4025 * Set the weight before calling ops.enable() so that the scheduler
4026 * doesn't see a stale value if they inspect the task struct.
4028 if (task_has_idle_policy(p))
4029 weight = WEIGHT_IDLEPRIO;
4031 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
4033 p->scx.weight = sched_weight_to_cgroup(weight);
4035 if (SCX_HAS_OP(enable))
4036 SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
4037 scx_set_task_state(p, SCX_TASK_ENABLED);
4039 if (SCX_HAS_OP(set_weight))
4040 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
4043 static void scx_ops_disable_task(struct task_struct *p)
4045 lockdep_assert_rq_held(task_rq(p));
4046 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
4048 if (SCX_HAS_OP(disable))
4049 SCX_CALL_OP_TASK(SCX_KF_REST, disable, p);
4050 scx_set_task_state(p, SCX_TASK_READY);
4053 static void scx_ops_exit_task(struct task_struct *p)
4055 struct scx_exit_task_args args = {
4059 lockdep_assert_rq_held(task_rq(p));
4061 switch (scx_get_task_state(p)) {
4065 args.cancelled = true;
4067 case SCX_TASK_READY:
4069 case SCX_TASK_ENABLED:
4070 scx_ops_disable_task(p);
4077 if (SCX_HAS_OP(exit_task))
4078 SCX_CALL_OP_TASK(SCX_KF_REST, exit_task, p, &args);
4079 scx_set_task_state(p, SCX_TASK_NONE);
4082 void init_scx_entity(struct sched_ext_entity *scx)
4084 memset(scx, 0, sizeof(*scx));
4085 INIT_LIST_HEAD(&scx->dsq_list.node);
4086 RB_CLEAR_NODE(&scx->dsq_priq);
4087 scx->sticky_cpu = -1;
4088 scx->holding_cpu = -1;
4089 INIT_LIST_HEAD(&scx->runnable_node);
4090 scx->runnable_at = jiffies;
4091 scx->ddsp_dsq_id = SCX_DSQ_INVALID;
4092 scx->slice = SCX_SLICE_DFL;
4095 void scx_pre_fork(struct task_struct *p)
4098 * BPF scheduler enable/disable paths want to be able to iterate and
4099 * update all tasks which can become complex when racing forks. As
4100 * enable/disable are very cold paths, let's use a percpu_rwsem to
4103 percpu_down_read(&scx_fork_rwsem);
4106 int scx_fork(struct task_struct *p)
4108 percpu_rwsem_assert_held(&scx_fork_rwsem);
4110 if (scx_ops_init_task_enabled)
4111 return scx_ops_init_task(p, task_group(p), true);
4116 void scx_post_fork(struct task_struct *p)
4118 if (scx_ops_init_task_enabled) {
4119 scx_set_task_state(p, SCX_TASK_READY);
4122 * Enable the task immediately if it's running on sched_ext.
4123 * Otherwise, it'll be enabled in switching_to_scx() if and
4124 * when it's ever configured to run with a SCHED_EXT policy.
4126 if (p->sched_class == &ext_sched_class) {
4130 rq = task_rq_lock(p, &rf);
4131 scx_ops_enable_task(p);
4132 task_rq_unlock(rq, p, &rf);
4136 spin_lock_irq(&scx_tasks_lock);
4137 list_add_tail(&p->scx.tasks_node, &scx_tasks);
4138 spin_unlock_irq(&scx_tasks_lock);
4140 percpu_up_read(&scx_fork_rwsem);
4143 void scx_cancel_fork(struct task_struct *p)
4145 if (scx_enabled()) {
4149 rq = task_rq_lock(p, &rf);
4150 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
4151 scx_ops_exit_task(p);
4152 task_rq_unlock(rq, p, &rf);
4155 percpu_up_read(&scx_fork_rwsem);
4158 void sched_ext_free(struct task_struct *p)
4160 unsigned long flags;
4162 spin_lock_irqsave(&scx_tasks_lock, flags);
4163 list_del_init(&p->scx.tasks_node);
4164 spin_unlock_irqrestore(&scx_tasks_lock, flags);
4167 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
4168 * ENABLED transitions can't race us. Disable ops for @p.
4170 if (scx_get_task_state(p) != SCX_TASK_NONE) {
4174 rq = task_rq_lock(p, &rf);
4175 scx_ops_exit_task(p);
4176 task_rq_unlock(rq, p, &rf);
4180 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
4181 const struct load_weight *lw)
4183 lockdep_assert_rq_held(task_rq(p));
4185 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
4186 if (SCX_HAS_OP(set_weight))
4187 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
4190 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
4194 static void switching_to_scx(struct rq *rq, struct task_struct *p)
4196 scx_ops_enable_task(p);
4199 * set_cpus_allowed_scx() is not called while @p is associated with a
4200 * different scheduler class. Keep the BPF scheduler up-to-date.
4202 if (SCX_HAS_OP(set_cpumask))
4203 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
4204 (struct cpumask *)p->cpus_ptr);
4207 static void switched_from_scx(struct rq *rq, struct task_struct *p)
4209 scx_ops_disable_task(p);
4212 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
4213 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
4215 int scx_check_setscheduler(struct task_struct *p, int policy)
4217 lockdep_assert_rq_held(task_rq(p));
4219 /* if disallow, reject transitioning into SCX */
4220 if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
4221 p->policy != policy && policy == SCHED_EXT)
4227 #ifdef CONFIG_NO_HZ_FULL
4228 bool scx_can_stop_tick(struct rq *rq)
4230 struct task_struct *p = rq->curr;
4232 if (scx_rq_bypassing(rq))
4235 if (p->sched_class != &ext_sched_class)
4239 * @rq can dispatch from different DSQs, so we can't tell whether it
4240 * needs the tick or not by looking at nr_running. Allow stopping ticks
4241 * iff the BPF scheduler indicated so. See set_next_task_scx().
4243 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4247 #ifdef CONFIG_EXT_GROUP_SCHED
4249 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
4250 static bool scx_cgroup_enabled;
4251 static bool cgroup_warned_missing_weight;
4252 static bool cgroup_warned_missing_idle;
4254 static void scx_cgroup_warn_missing_weight(struct task_group *tg)
4256 if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
4257 cgroup_warned_missing_weight)
4260 if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
4263 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
4265 cgroup_warned_missing_weight = true;
4268 static void scx_cgroup_warn_missing_idle(struct task_group *tg)
4270 if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
4276 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
4278 cgroup_warned_missing_idle = true;
4281 int scx_tg_online(struct task_group *tg)
4285 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4287 percpu_down_read(&scx_cgroup_rwsem);
4289 scx_cgroup_warn_missing_weight(tg);
4291 if (scx_cgroup_enabled) {
4292 if (SCX_HAS_OP(cgroup_init)) {
4293 struct scx_cgroup_init_args args =
4294 { .weight = tg->scx_weight };
4296 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4297 tg->css.cgroup, &args);
4299 ret = ops_sanitize_err("cgroup_init", ret);
4302 tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4304 tg->scx_flags |= SCX_TG_ONLINE;
4307 percpu_up_read(&scx_cgroup_rwsem);
4311 void scx_tg_offline(struct task_group *tg)
4313 WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
4315 percpu_down_read(&scx_cgroup_rwsem);
4317 if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
4318 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
4319 tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4321 percpu_up_read(&scx_cgroup_rwsem);
4324 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
4326 struct cgroup_subsys_state *css;
4327 struct task_struct *p;
4330 /* released in scx_finish/cancel_attach() */
4331 percpu_down_read(&scx_cgroup_rwsem);
4333 if (!scx_cgroup_enabled)
4336 cgroup_taskset_for_each(p, css, tset) {
4337 struct cgroup *from = tg_cgrp(task_group(p));
4338 struct cgroup *to = tg_cgrp(css_tg(css));
4340 WARN_ON_ONCE(p->scx.cgrp_moving_from);
4343 * sched_move_task() omits identity migrations. Let's match the
4344 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
4345 * always match one-to-one.
4350 if (SCX_HAS_OP(cgroup_prep_move)) {
4351 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
4352 p, from, css->cgroup);
4357 p->scx.cgrp_moving_from = from;
4363 cgroup_taskset_for_each(p, css, tset) {
4364 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4365 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4366 p->scx.cgrp_moving_from, css->cgroup);
4367 p->scx.cgrp_moving_from = NULL;
4370 percpu_up_read(&scx_cgroup_rwsem);
4371 return ops_sanitize_err("cgroup_prep_move", ret);
4374 void scx_cgroup_move_task(struct task_struct *p)
4376 if (!scx_cgroup_enabled)
4380 * @p must have ops.cgroup_prep_move() called on it and thus
4381 * cgrp_moving_from set.
4383 if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4384 SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
4385 p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
4386 p->scx.cgrp_moving_from = NULL;
4389 void scx_cgroup_finish_attach(void)
4391 percpu_up_read(&scx_cgroup_rwsem);
4394 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4396 struct cgroup_subsys_state *css;
4397 struct task_struct *p;
4399 if (!scx_cgroup_enabled)
4402 cgroup_taskset_for_each(p, css, tset) {
4403 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4404 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4405 p->scx.cgrp_moving_from, css->cgroup);
4406 p->scx.cgrp_moving_from = NULL;
4409 percpu_up_read(&scx_cgroup_rwsem);
4412 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4414 percpu_down_read(&scx_cgroup_rwsem);
4416 if (scx_cgroup_enabled && tg->scx_weight != weight) {
4417 if (SCX_HAS_OP(cgroup_set_weight))
4418 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
4419 tg_cgrp(tg), weight);
4420 tg->scx_weight = weight;
4423 percpu_up_read(&scx_cgroup_rwsem);
4426 void scx_group_set_idle(struct task_group *tg, bool idle)
4428 percpu_down_read(&scx_cgroup_rwsem);
4429 scx_cgroup_warn_missing_idle(tg);
4430 percpu_up_read(&scx_cgroup_rwsem);
4433 static void scx_cgroup_lock(void)
4435 percpu_down_write(&scx_cgroup_rwsem);
4438 static void scx_cgroup_unlock(void)
4440 percpu_up_write(&scx_cgroup_rwsem);
4443 #else /* CONFIG_EXT_GROUP_SCHED */
4445 static inline void scx_cgroup_lock(void) {}
4446 static inline void scx_cgroup_unlock(void) {}
4448 #endif /* CONFIG_EXT_GROUP_SCHED */
4451 * Omitted operations:
4453 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
4454 * isn't tied to the CPU at that point. Preemption is implemented by resetting
4455 * the victim task's slice to 0 and triggering reschedule on the target CPU.
4457 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4459 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4460 * their current sched_class. Call them directly from sched core instead.
4462 DEFINE_SCHED_CLASS(ext) = {
4463 .enqueue_task = enqueue_task_scx,
4464 .dequeue_task = dequeue_task_scx,
4465 .yield_task = yield_task_scx,
4466 .yield_to_task = yield_to_task_scx,
4468 .wakeup_preempt = wakeup_preempt_scx,
4470 .balance = balance_scx,
4471 .pick_task = pick_task_scx,
4473 .put_prev_task = put_prev_task_scx,
4474 .set_next_task = set_next_task_scx,
4477 .select_task_rq = select_task_rq_scx,
4478 .task_woken = task_woken_scx,
4479 .set_cpus_allowed = set_cpus_allowed_scx,
4481 .rq_online = rq_online_scx,
4482 .rq_offline = rq_offline_scx,
4485 .task_tick = task_tick_scx,
4487 .switching_to = switching_to_scx,
4488 .switched_from = switched_from_scx,
4489 .switched_to = switched_to_scx,
4490 .reweight_task = reweight_task_scx,
4491 .prio_changed = prio_changed_scx,
4493 .update_curr = update_curr_scx,
4495 #ifdef CONFIG_UCLAMP_TASK
4496 .uclamp_enabled = 1,
4500 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
4502 memset(dsq, 0, sizeof(*dsq));
4504 raw_spin_lock_init(&dsq->lock);
4505 INIT_LIST_HEAD(&dsq->list);
4509 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
4511 struct scx_dispatch_q *dsq;
4514 if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
4515 return ERR_PTR(-EINVAL);
4517 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4519 return ERR_PTR(-ENOMEM);
4521 init_dsq(dsq, dsq_id);
4523 ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
4527 return ERR_PTR(ret);
4532 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4534 struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4535 struct scx_dispatch_q *dsq, *tmp_dsq;
4537 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4538 kfree_rcu(dsq, rcu);
4541 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4543 static void destroy_dsq(u64 dsq_id)
4545 struct scx_dispatch_q *dsq;
4546 unsigned long flags;
4550 dsq = find_user_dsq(dsq_id);
4552 goto out_unlock_rcu;
4554 raw_spin_lock_irqsave(&dsq->lock, flags);
4557 scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4559 goto out_unlock_dsq;
4562 if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
4563 goto out_unlock_dsq;
4566 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4567 * queueing more tasks. As this function can be called from anywhere,
4568 * freeing is bounced through an irq work to avoid nesting RCU
4569 * operations inside scheduler locks.
4571 dsq->id = SCX_DSQ_INVALID;
4572 llist_add(&dsq->free_node, &dsqs_to_free);
4573 irq_work_queue(&free_dsq_irq_work);
4576 raw_spin_unlock_irqrestore(&dsq->lock, flags);
4581 #ifdef CONFIG_EXT_GROUP_SCHED
4582 static void scx_cgroup_exit(void)
4584 struct cgroup_subsys_state *css;
4586 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4588 scx_cgroup_enabled = false;
4591 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4592 * cgroups and exit all the inited ones, all online cgroups are exited.
4595 css_for_each_descendant_post(css, &root_task_group.css) {
4596 struct task_group *tg = css_tg(css);
4598 if (!(tg->scx_flags & SCX_TG_INITED))
4600 tg->scx_flags &= ~SCX_TG_INITED;
4602 if (!scx_ops.cgroup_exit)
4605 if (WARN_ON_ONCE(!css_tryget(css)))
4609 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
4617 static int scx_cgroup_init(void)
4619 struct cgroup_subsys_state *css;
4622 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4624 cgroup_warned_missing_weight = false;
4625 cgroup_warned_missing_idle = false;
4628 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4629 * cgroups and init, all online cgroups are initialized.
4632 css_for_each_descendant_pre(css, &root_task_group.css) {
4633 struct task_group *tg = css_tg(css);
4634 struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4636 scx_cgroup_warn_missing_weight(tg);
4637 scx_cgroup_warn_missing_idle(tg);
4639 if ((tg->scx_flags &
4640 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4643 if (!scx_ops.cgroup_init) {
4644 tg->scx_flags |= SCX_TG_INITED;
4648 if (WARN_ON_ONCE(!css_tryget(css)))
4652 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4653 css->cgroup, &args);
4656 scx_ops_error("ops.cgroup_init() failed (%d)", ret);
4659 tg->scx_flags |= SCX_TG_INITED;
4666 WARN_ON_ONCE(scx_cgroup_enabled);
4667 scx_cgroup_enabled = true;
4673 static void scx_cgroup_exit(void) {}
4674 static int scx_cgroup_init(void) { return 0; }
4678 /********************************************************************************
4679 * Sysfs interface and ops enable/disable.
4682 #define SCX_ATTR(_name) \
4683 static struct kobj_attribute scx_attr_##_name = { \
4684 .attr = { .name = __stringify(_name), .mode = 0444 }, \
4685 .show = scx_attr_##_name##_show, \
4688 static ssize_t scx_attr_state_show(struct kobject *kobj,
4689 struct kobj_attribute *ka, char *buf)
4691 return sysfs_emit(buf, "%s\n",
4692 scx_ops_enable_state_str[scx_ops_enable_state()]);
4696 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4697 struct kobj_attribute *ka, char *buf)
4699 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4701 SCX_ATTR(switch_all);
4703 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4704 struct kobj_attribute *ka, char *buf)
4706 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4708 SCX_ATTR(nr_rejected);
4710 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4711 struct kobj_attribute *ka, char *buf)
4713 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4715 SCX_ATTR(hotplug_seq);
4717 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4718 struct kobj_attribute *ka, char *buf)
4720 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4722 SCX_ATTR(enable_seq);
4724 static struct attribute *scx_global_attrs[] = {
4725 &scx_attr_state.attr,
4726 &scx_attr_switch_all.attr,
4727 &scx_attr_nr_rejected.attr,
4728 &scx_attr_hotplug_seq.attr,
4729 &scx_attr_enable_seq.attr,
4733 static const struct attribute_group scx_global_attr_group = {
4734 .attrs = scx_global_attrs,
4737 static void scx_kobj_release(struct kobject *kobj)
4742 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4743 struct kobj_attribute *ka, char *buf)
4745 return sysfs_emit(buf, "%s\n", scx_ops.name);
4749 static struct attribute *scx_sched_attrs[] = {
4753 ATTRIBUTE_GROUPS(scx_sched);
4755 static const struct kobj_type scx_ktype = {
4756 .release = scx_kobj_release,
4757 .sysfs_ops = &kobj_sysfs_ops,
4758 .default_groups = scx_sched_groups,
4761 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4763 return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
4766 static const struct kset_uevent_ops scx_uevent_ops = {
4767 .uevent = scx_uevent,
4771 * Used by sched_fork() and __setscheduler_prio() to pick the matching
4772 * sched_class. dl/rt are already handled.
4774 bool task_should_scx(int policy)
4776 if (!scx_enabled() ||
4777 unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
4779 if (READ_ONCE(scx_switching_all))
4781 return policy == SCHED_EXT;
4785 * scx_softlockup - sched_ext softlockup handler
4786 * @dur_s: number of seconds of CPU stuck due to soft lockup
4788 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
4789 * live-lock the system by making many CPUs target the same DSQ to the point
4790 * where soft-lockup detection triggers. This function is called from
4791 * soft-lockup watchdog when the triggering point is close and tries to unjam
4792 * the system by enabling the breather and aborting the BPF scheduler.
4794 void scx_softlockup(u32 dur_s)
4796 switch (scx_ops_enable_state()) {
4797 case SCX_OPS_ENABLING:
4798 case SCX_OPS_ENABLED:
4804 /* allow only one instance, cleared at the end of scx_ops_bypass() */
4805 if (test_and_set_bit(0, &scx_in_softlockup))
4808 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
4809 smp_processor_id(), dur_s, scx_ops.name);
4812 * Some CPUs may be trapped in the dispatch paths. Enable breather
4813 * immediately; otherwise, we might even be able to get to
4816 atomic_inc(&scx_ops_breather_depth);
4818 scx_ops_error("soft lockup - CPU#%d stuck for %us",
4819 smp_processor_id(), dur_s);
4822 static void scx_clear_softlockup(void)
4824 if (test_and_clear_bit(0, &scx_in_softlockup))
4825 atomic_dec(&scx_ops_breather_depth);
4829 * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
4830 * @bypass: true for bypass, false for unbypass
4832 * Bypassing guarantees that all runnable tasks make forward progress without
4833 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4834 * be held by tasks that the BPF scheduler is forgetting to run, which
4835 * unfortunately also excludes toggling the static branches.
4837 * Let's work around by overriding a couple ops and modifying behaviors based on
4838 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4839 * to force global FIFO scheduling.
4841 * - ops.select_cpu() is ignored and the default select_cpu() is used.
4843 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4844 * %SCX_OPS_ENQ_LAST is also ignored.
4846 * - ops.dispatch() is ignored.
4848 * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4849 * can't be trusted. Whenever a tick triggers, the running task is rotated to
4850 * the tail of the queue with core_sched_at touched.
4852 * - pick_next_task() suppresses zero slice warning.
4854 * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4857 * - scx_prio_less() reverts to the default core_sched_at order.
4859 static void scx_ops_bypass(bool bypass)
4861 static DEFINE_RAW_SPINLOCK(bypass_lock);
4863 unsigned long flags;
4865 raw_spin_lock_irqsave(&bypass_lock, flags);
4867 scx_ops_bypass_depth++;
4868 WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
4869 if (scx_ops_bypass_depth != 1)
4872 scx_ops_bypass_depth--;
4873 WARN_ON_ONCE(scx_ops_bypass_depth < 0);
4874 if (scx_ops_bypass_depth != 0)
4878 atomic_inc(&scx_ops_breather_depth);
4881 * No task property is changing. We just need to make sure all currently
4882 * queued tasks are re-queued according to the new scx_rq_bypassing()
4883 * state. As an optimization, walk each rq's runnable_list instead of
4884 * the scx_tasks list.
4886 * This function can't trust the scheduler and thus can't use
4887 * cpus_read_lock(). Walk all possible CPUs instead of online.
4889 for_each_possible_cpu(cpu) {
4890 struct rq *rq = cpu_rq(cpu);
4891 struct task_struct *p, *n;
4893 raw_spin_rq_lock(rq);
4896 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4897 rq->scx.flags |= SCX_RQ_BYPASSING;
4899 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4900 rq->scx.flags &= ~SCX_RQ_BYPASSING;
4904 * We need to guarantee that no tasks are on the BPF scheduler
4905 * while bypassing. Either we see enabled or the enable path
4906 * sees scx_rq_bypassing() before moving tasks to SCX.
4908 if (!scx_enabled()) {
4909 raw_spin_rq_unlock(rq);
4914 * The use of list_for_each_entry_safe_reverse() is required
4915 * because each task is going to be removed from and added back
4916 * to the runnable_list during iteration. Because they're added
4917 * to the tail of the list, safe reverse iteration can still
4920 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4921 scx.runnable_node) {
4922 struct sched_enq_and_set_ctx ctx;
4924 /* cycling deq/enq is enough, see the function comment */
4925 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4926 sched_enq_and_set_task(&ctx);
4929 /* resched to restore ticks and idle state */
4930 if (cpu_online(cpu) || cpu == smp_processor_id())
4933 raw_spin_rq_unlock(rq);
4936 atomic_dec(&scx_ops_breather_depth);
4938 raw_spin_unlock_irqrestore(&bypass_lock, flags);
4939 scx_clear_softlockup();
4942 static void free_exit_info(struct scx_exit_info *ei)
4950 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4952 struct scx_exit_info *ei;
4954 ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4958 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4959 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4960 ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
4962 if (!ei->bt || !ei->msg || !ei->dump) {
4970 static const char *scx_exit_reason(enum scx_exit_kind kind)
4973 case SCX_EXIT_UNREG:
4974 return "unregistered from user space";
4975 case SCX_EXIT_UNREG_BPF:
4976 return "unregistered from BPF";
4977 case SCX_EXIT_UNREG_KERN:
4978 return "unregistered from the main kernel";
4979 case SCX_EXIT_SYSRQ:
4980 return "disabled by sysrq-S";
4981 case SCX_EXIT_ERROR:
4982 return "runtime error";
4983 case SCX_EXIT_ERROR_BPF:
4984 return "scx_bpf_error";
4985 case SCX_EXIT_ERROR_STALL:
4986 return "runnable task stall";
4992 static void scx_ops_disable_workfn(struct kthread_work *work)
4994 struct scx_exit_info *ei = scx_exit_info;
4995 struct scx_task_iter sti;
4996 struct task_struct *p;
4997 struct rhashtable_iter rht_iter;
4998 struct scx_dispatch_q *dsq;
5001 kind = atomic_read(&scx_exit_kind);
5004 * NONE indicates that a new scx_ops has been registered since
5005 * disable was scheduled - don't kill the new ops. DONE
5006 * indicates that the ops has already been disabled.
5008 if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
5010 if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
5014 ei->reason = scx_exit_reason(ei->kind);
5016 /* guarantee forward progress by bypassing scx_ops */
5017 scx_ops_bypass(true);
5019 switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
5020 case SCX_OPS_DISABLING:
5021 WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
5023 case SCX_OPS_DISABLED:
5024 pr_warn("sched_ext: ops error detected without ops (%s)\n",
5025 scx_exit_info->msg);
5026 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
5034 * Here, every runnable task is guaranteed to make forward progress and
5035 * we can safely use blocking synchronization constructs. Actually
5038 mutex_lock(&scx_ops_enable_mutex);
5040 static_branch_disable(&__scx_switched_all);
5041 WRITE_ONCE(scx_switching_all, false);
5044 * Shut down cgroup support before tasks so that the cgroup attach path
5045 * doesn't race against scx_ops_exit_task().
5049 scx_cgroup_unlock();
5052 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
5053 * must be switched out and exited synchronously.
5055 percpu_down_write(&scx_fork_rwsem);
5057 scx_ops_init_task_enabled = false;
5059 scx_task_iter_start(&sti);
5060 while ((p = scx_task_iter_next_locked(&sti))) {
5061 const struct sched_class *old_class = p->sched_class;
5062 const struct sched_class *new_class =
5063 __setscheduler_class(p->policy, p->prio);
5064 struct sched_enq_and_set_ctx ctx;
5066 if (old_class != new_class && p->se.sched_delayed)
5067 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5069 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5071 p->sched_class = new_class;
5072 check_class_changing(task_rq(p), p, old_class);
5074 sched_enq_and_set_task(&ctx);
5076 check_class_changed(task_rq(p), p, old_class, p->prio);
5077 scx_ops_exit_task(p);
5079 scx_task_iter_stop(&sti);
5080 percpu_up_write(&scx_fork_rwsem);
5083 * Invalidate all the rq clocks to prevent getting outdated
5084 * rq clocks from a previous scx scheduler.
5086 for_each_possible_cpu(cpu) {
5087 struct rq *rq = cpu_rq(cpu);
5088 scx_rq_clock_invalidate(rq);
5091 /* no task is on scx, turn off all the switches and flush in-progress calls */
5092 static_branch_disable(&__scx_ops_enabled);
5093 for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
5094 static_branch_disable(&scx_has_op[i]);
5095 static_branch_disable(&scx_ops_enq_last);
5096 static_branch_disable(&scx_ops_enq_exiting);
5097 static_branch_disable(&scx_ops_enq_migration_disabled);
5098 static_branch_disable(&scx_ops_cpu_preempt);
5099 static_branch_disable(&scx_builtin_idle_enabled);
5102 if (ei->kind >= SCX_EXIT_ERROR) {
5103 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5104 scx_ops.name, ei->reason);
5106 if (ei->msg[0] != '\0')
5107 pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
5108 #ifdef CONFIG_STACKTRACE
5109 stack_trace_print(ei->bt, ei->bt_len, 2);
5112 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5113 scx_ops.name, ei->reason);
5117 SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
5119 cancel_delayed_work_sync(&scx_watchdog_work);
5122 * Delete the kobject from the hierarchy eagerly in addition to just
5123 * dropping a reference. Otherwise, if the object is deleted
5124 * asynchronously, sysfs could observe an object of the same name still
5125 * in the hierarchy when another scheduler is loaded.
5127 kobject_del(scx_root_kobj);
5128 kobject_put(scx_root_kobj);
5129 scx_root_kobj = NULL;
5131 memset(&scx_ops, 0, sizeof(scx_ops));
5133 rhashtable_walk_enter(&dsq_hash, &rht_iter);
5135 rhashtable_walk_start(&rht_iter);
5137 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
5138 destroy_dsq(dsq->id);
5140 rhashtable_walk_stop(&rht_iter);
5141 } while (dsq == ERR_PTR(-EAGAIN));
5142 rhashtable_walk_exit(&rht_iter);
5144 free_percpu(scx_dsp_ctx);
5146 scx_dsp_max_batch = 0;
5148 free_exit_info(scx_exit_info);
5149 scx_exit_info = NULL;
5151 mutex_unlock(&scx_ops_enable_mutex);
5153 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
5156 scx_ops_bypass(false);
5159 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
5161 static void schedule_scx_ops_disable_work(void)
5163 struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
5166 * We may be called spuriously before the first bpf_sched_ext_reg(). If
5167 * scx_ops_helper isn't set up yet, there's nothing to do.
5170 kthread_queue_work(helper, &scx_ops_disable_work);
5173 static void scx_ops_disable(enum scx_exit_kind kind)
5175 int none = SCX_EXIT_NONE;
5177 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
5178 kind = SCX_EXIT_ERROR;
5180 atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
5182 schedule_scx_ops_disable_work();
5185 static void dump_newline(struct seq_buf *s)
5187 trace_sched_ext_dump("");
5189 /* @s may be zero sized and seq_buf triggers WARN if so */
5191 seq_buf_putc(s, '\n');
5194 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
5198 #ifdef CONFIG_TRACEPOINTS
5199 if (trace_sched_ext_dump_enabled()) {
5200 /* protected by scx_dump_state()::dump_lock */
5201 static char line_buf[SCX_EXIT_MSG_LEN];
5203 va_start(args, fmt);
5204 vscnprintf(line_buf, sizeof(line_buf), fmt, args);
5207 trace_sched_ext_dump(line_buf);
5210 /* @s may be zero sized and seq_buf triggers WARN if so */
5212 va_start(args, fmt);
5213 seq_buf_vprintf(s, fmt, args);
5216 seq_buf_putc(s, '\n');
5220 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
5221 const unsigned long *bt, unsigned int len)
5225 for (i = 0; i < len; i++)
5226 dump_line(s, "%s%pS", prefix, (void *)bt[i]);
5229 static void ops_dump_init(struct seq_buf *s, const char *prefix)
5231 struct scx_dump_data *dd = &scx_dump_data;
5233 lockdep_assert_irqs_disabled();
5235 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
5239 dd->prefix = prefix;
5242 static void ops_dump_flush(void)
5244 struct scx_dump_data *dd = &scx_dump_data;
5245 char *line = dd->buf.line;
5251 * There's something to flush and this is the first line. Insert a blank
5252 * line to distinguish ops dump.
5255 dump_newline(dd->s);
5260 * There may be multiple lines in $line. Scan and emit each line
5267 while (*end != '\n' && *end != '\0')
5271 * If $line overflowed, it may not have newline at the end.
5272 * Always emit with a newline.
5276 dump_line(dd->s, "%s%s", dd->prefix, line);
5280 /* move to the next line */
5290 static void ops_dump_exit(void)
5293 scx_dump_data.cpu = -1;
5296 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
5297 struct task_struct *p, char marker)
5299 static unsigned long bt[SCX_EXIT_BT_LEN];
5300 char dsq_id_buf[19] = "(n/a)";
5301 unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
5302 unsigned int bt_len = 0;
5305 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
5306 (unsigned long long)p->scx.dsq->id);
5309 dump_line(s, " %c%c %s[%d] %+ldms",
5310 marker, task_state_to_char(p), p->comm, p->pid,
5311 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
5312 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
5313 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
5314 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
5315 ops_state >> SCX_OPSS_QSEQ_SHIFT);
5316 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s",
5317 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
5318 dump_line(s, " dsq_vtime=%llu slice=%llu weight=%u",
5319 p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
5320 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
5322 if (SCX_HAS_OP(dump_task)) {
5323 ops_dump_init(s, " ");
5324 SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
5328 #ifdef CONFIG_STACKTRACE
5329 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
5333 dump_stack_trace(s, " ", bt, bt_len);
5337 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
5339 static DEFINE_SPINLOCK(dump_lock);
5340 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
5341 struct scx_dump_ctx dctx = {
5343 .exit_code = ei->exit_code,
5344 .reason = ei->reason,
5345 .at_ns = ktime_get_ns(),
5346 .at_jiffies = jiffies,
5349 unsigned long flags;
5353 spin_lock_irqsave(&dump_lock, flags);
5355 seq_buf_init(&s, ei->dump, dump_len);
5357 if (ei->kind == SCX_EXIT_NONE) {
5358 dump_line(&s, "Debug dump triggered by %s", ei->reason);
5360 dump_line(&s, "%s[%d] triggered exit kind %d:",
5361 current->comm, current->pid, ei->kind);
5362 dump_line(&s, " %s (%s)", ei->reason, ei->msg);
5364 dump_line(&s, "Backtrace:");
5365 dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
5368 if (SCX_HAS_OP(dump)) {
5369 ops_dump_init(&s, "");
5370 SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
5375 dump_line(&s, "CPU states");
5376 dump_line(&s, "----------");
5378 for_each_possible_cpu(cpu) {
5379 struct rq *rq = cpu_rq(cpu);
5381 struct task_struct *p;
5388 idle = list_empty(&rq->scx.runnable_list) &&
5389 rq->curr->sched_class == &idle_sched_class;
5391 if (idle && !SCX_HAS_OP(dump_cpu))
5395 * We don't yet know whether ops.dump_cpu() will produce output
5396 * and we may want to skip the default CPU dump if it doesn't.
5397 * Use a nested seq_buf to generate the standard dump so that we
5398 * can decide whether to commit later.
5400 avail = seq_buf_get_buf(&s, &buf);
5401 seq_buf_init(&ns, buf, avail);
5404 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
5405 cpu, rq->scx.nr_running, rq->scx.flags,
5406 rq->scx.cpu_released, rq->scx.ops_qseq,
5408 dump_line(&ns, " curr=%s[%d] class=%ps",
5409 rq->curr->comm, rq->curr->pid,
5410 rq->curr->sched_class);
5411 if (!cpumask_empty(rq->scx.cpus_to_kick))
5412 dump_line(&ns, " cpus_to_kick : %*pb",
5413 cpumask_pr_args(rq->scx.cpus_to_kick));
5414 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
5415 dump_line(&ns, " idle_to_kick : %*pb",
5416 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
5417 if (!cpumask_empty(rq->scx.cpus_to_preempt))
5418 dump_line(&ns, " cpus_to_preempt: %*pb",
5419 cpumask_pr_args(rq->scx.cpus_to_preempt));
5420 if (!cpumask_empty(rq->scx.cpus_to_wait))
5421 dump_line(&ns, " cpus_to_wait : %*pb",
5422 cpumask_pr_args(rq->scx.cpus_to_wait));
5424 used = seq_buf_used(&ns);
5425 if (SCX_HAS_OP(dump_cpu)) {
5426 ops_dump_init(&ns, " ");
5427 SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
5432 * If idle && nothing generated by ops.dump_cpu(), there's
5433 * nothing interesting. Skip.
5435 if (idle && used == seq_buf_used(&ns))
5439 * $s may already have overflowed when $ns was created. If so,
5440 * calling commit on it will trigger BUG.
5443 seq_buf_commit(&s, seq_buf_used(&ns));
5444 if (seq_buf_has_overflowed(&ns))
5445 seq_buf_set_overflow(&s);
5448 if (rq->curr->sched_class == &ext_sched_class)
5449 scx_dump_task(&s, &dctx, rq->curr, '*');
5451 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
5452 scx_dump_task(&s, &dctx, p, ' ');
5457 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
5458 memcpy(ei->dump + dump_len - sizeof(trunc_marker),
5459 trunc_marker, sizeof(trunc_marker));
5461 spin_unlock_irqrestore(&dump_lock, flags);
5464 static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
5466 struct scx_exit_info *ei = scx_exit_info;
5468 if (ei->kind >= SCX_EXIT_ERROR)
5469 scx_dump_state(ei, scx_ops.exit_dump_len);
5471 schedule_scx_ops_disable_work();
5474 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
5476 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
5478 const char *fmt, ...)
5480 struct scx_exit_info *ei = scx_exit_info;
5481 int none = SCX_EXIT_NONE;
5484 if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
5487 ei->exit_code = exit_code;
5488 #ifdef CONFIG_STACKTRACE
5489 if (kind >= SCX_EXIT_ERROR)
5490 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
5492 va_start(args, fmt);
5493 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
5497 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
5498 * in scx_ops_disable_workfn().
5501 ei->reason = scx_exit_reason(ei->kind);
5503 irq_work_queue(&scx_ops_error_irq_work);
5506 static struct kthread_worker *scx_create_rt_helper(const char *name)
5508 struct kthread_worker *helper;
5510 helper = kthread_run_worker(0, name);
5512 sched_set_fifo(helper->task);
5516 static void check_hotplug_seq(const struct sched_ext_ops *ops)
5518 unsigned long long global_hotplug_seq;
5521 * If a hotplug event has occurred between when a scheduler was
5522 * initialized, and when we were able to attach, exit and notify user
5525 if (ops->hotplug_seq) {
5526 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
5527 if (ops->hotplug_seq != global_hotplug_seq) {
5528 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
5529 "expected hotplug seq %llu did not match actual %llu",
5530 ops->hotplug_seq, global_hotplug_seq);
5535 static int validate_ops(const struct sched_ext_ops *ops)
5538 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
5539 * ops.enqueue() callback isn't implemented.
5541 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
5542 scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5549 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5551 struct scx_task_iter sti;
5552 struct task_struct *p;
5553 unsigned long timeout;
5554 int i, cpu, node, ret;
5556 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
5557 cpu_possible_mask)) {
5558 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5562 mutex_lock(&scx_ops_enable_mutex);
5564 if (!scx_ops_helper) {
5565 WRITE_ONCE(scx_ops_helper,
5566 scx_create_rt_helper("sched_ext_ops_helper"));
5567 if (!scx_ops_helper) {
5574 struct scx_dispatch_q **dsqs;
5576 dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
5582 for_each_node_state(node, N_POSSIBLE) {
5583 struct scx_dispatch_q *dsq;
5585 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5587 for_each_node_state(node, N_POSSIBLE)
5594 init_dsq(dsq, SCX_DSQ_GLOBAL);
5601 if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
5606 scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
5607 if (!scx_root_kobj) {
5612 scx_root_kobj->kset = scx_kset;
5613 ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
5617 scx_exit_info = alloc_exit_info(ops->exit_dump_len);
5618 if (!scx_exit_info) {
5624 * Set scx_ops, transition to ENABLING and clear exit info to arm the
5625 * disable path. Failure triggers full disabling from here on.
5629 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
5632 atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
5633 scx_warned_zero_slice = false;
5635 atomic_long_set(&scx_nr_rejected, 0);
5637 for_each_possible_cpu(cpu)
5638 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5641 * Keep CPUs stable during enable so that the BPF scheduler can track
5642 * online CPUs by watching ->on/offline_cpu() after ->init().
5647 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
5649 ret = ops_sanitize_err("init", ret);
5651 scx_ops_error("ops.init() failed (%d)", ret);
5656 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5657 if (((void (**)(void))ops)[i])
5658 static_branch_enable_cpuslocked(&scx_has_op[i]);
5660 check_hotplug_seq(ops);
5662 update_selcpu_topology();
5666 ret = validate_ops(ops);
5670 WARN_ON_ONCE(scx_dsp_ctx);
5671 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5672 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5674 __alignof__(struct scx_dsp_ctx));
5680 if (ops->timeout_ms)
5681 timeout = msecs_to_jiffies(ops->timeout_ms);
5683 timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5685 WRITE_ONCE(scx_watchdog_timeout, timeout);
5686 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5687 queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5688 scx_watchdog_timeout / 2);
5691 * Once __scx_ops_enabled is set, %current can be switched to SCX
5692 * anytime. This can lead to stalls as some BPF schedulers (e.g.
5693 * userspace scheduling) may not function correctly before all tasks are
5694 * switched. Init in bypass mode to guarantee forward progress.
5696 scx_ops_bypass(true);
5698 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5699 if (((void (**)(void))ops)[i])
5700 static_branch_enable(&scx_has_op[i]);
5702 if (ops->flags & SCX_OPS_ENQ_LAST)
5703 static_branch_enable(&scx_ops_enq_last);
5705 if (ops->flags & SCX_OPS_ENQ_EXITING)
5706 static_branch_enable(&scx_ops_enq_exiting);
5707 if (ops->flags & SCX_OPS_ENQ_MIGRATION_DISABLED)
5708 static_branch_enable(&scx_ops_enq_migration_disabled);
5709 if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5710 static_branch_enable(&scx_ops_cpu_preempt);
5712 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
5714 static_branch_enable(&scx_builtin_idle_enabled);
5716 static_branch_disable(&scx_builtin_idle_enabled);
5720 * Lock out forks, cgroup on/offlining and moves before opening the
5721 * floodgate so that they don't wander into the operations prematurely.
5723 percpu_down_write(&scx_fork_rwsem);
5725 WARN_ON_ONCE(scx_ops_init_task_enabled);
5726 scx_ops_init_task_enabled = true;
5729 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5730 * preventing new tasks from being added. No need to exclude tasks
5731 * leaving as sched_ext_free() can handle both prepped and enabled
5732 * tasks. Prep all tasks first and then enable them with preemption
5735 * All cgroups should be initialized before scx_ops_init_task() so that
5736 * the BPF scheduler can reliably track each task's cgroup membership
5737 * from scx_ops_init_task(). Lock out cgroup on/offlining and task
5738 * migrations while tasks are being initialized so that
5739 * scx_cgroup_can_attach() never sees uninitialized tasks.
5742 ret = scx_cgroup_init();
5744 goto err_disable_unlock_all;
5746 scx_task_iter_start(&sti);
5747 while ((p = scx_task_iter_next_locked(&sti))) {
5749 * @p may already be dead, have lost all its usages counts and
5750 * be waiting for RCU grace period before being freed. @p can't
5751 * be initialized for SCX in such cases and should be ignored.
5753 if (!tryget_task_struct(p))
5756 scx_task_iter_unlock(&sti);
5758 ret = scx_ops_init_task(p, task_group(p), false);
5761 scx_task_iter_relock(&sti);
5762 scx_task_iter_stop(&sti);
5763 scx_ops_error("ops.init_task() failed (%d) for %s[%d]",
5764 ret, p->comm, p->pid);
5765 goto err_disable_unlock_all;
5768 scx_set_task_state(p, SCX_TASK_READY);
5771 scx_task_iter_relock(&sti);
5773 scx_task_iter_stop(&sti);
5774 scx_cgroup_unlock();
5775 percpu_up_write(&scx_fork_rwsem);
5778 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5779 * all eligible tasks.
5781 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5782 static_branch_enable(&__scx_ops_enabled);
5785 * We're fully committed and can't fail. The task READY -> ENABLED
5786 * transitions here are synchronized against sched_ext_free() through
5789 percpu_down_write(&scx_fork_rwsem);
5790 scx_task_iter_start(&sti);
5791 while ((p = scx_task_iter_next_locked(&sti))) {
5792 const struct sched_class *old_class = p->sched_class;
5793 const struct sched_class *new_class =
5794 __setscheduler_class(p->policy, p->prio);
5795 struct sched_enq_and_set_ctx ctx;
5797 if (old_class != new_class && p->se.sched_delayed)
5798 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5800 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5802 p->scx.slice = SCX_SLICE_DFL;
5803 p->sched_class = new_class;
5804 check_class_changing(task_rq(p), p, old_class);
5806 sched_enq_and_set_task(&ctx);
5808 check_class_changed(task_rq(p), p, old_class, p->prio);
5810 scx_task_iter_stop(&sti);
5811 percpu_up_write(&scx_fork_rwsem);
5813 scx_ops_bypass(false);
5815 if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
5816 WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
5820 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5821 static_branch_enable(&__scx_switched_all);
5823 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5824 scx_ops.name, scx_switched_all() ? "" : " (partial)");
5825 kobject_uevent(scx_root_kobj, KOBJ_ADD);
5826 mutex_unlock(&scx_ops_enable_mutex);
5828 atomic_long_inc(&scx_enable_seq);
5833 kobject_del(scx_root_kobj);
5835 kobject_put(scx_root_kobj);
5836 scx_root_kobj = NULL;
5837 if (scx_exit_info) {
5838 free_exit_info(scx_exit_info);
5839 scx_exit_info = NULL;
5842 mutex_unlock(&scx_ops_enable_mutex);
5845 err_disable_unlock_all:
5846 scx_cgroup_unlock();
5847 percpu_up_write(&scx_fork_rwsem);
5848 scx_ops_bypass(false);
5850 mutex_unlock(&scx_ops_enable_mutex);
5852 * Returning an error code here would not pass all the error information
5853 * to userspace. Record errno using scx_ops_error() for cases
5854 * scx_ops_error() wasn't already invoked and exit indicating success so
5855 * that the error is notified through ops.exit() with all the details.
5857 * Flush scx_ops_disable_work to ensure that error is reported before
5860 scx_ops_error("scx_ops_enable() failed (%d)", ret);
5861 kthread_flush_work(&scx_ops_disable_work);
5866 /********************************************************************************
5867 * bpf_struct_ops plumbing.
5869 #include <linux/bpf_verifier.h>
5870 #include <linux/bpf.h>
5871 #include <linux/btf.h>
5873 static const struct btf_type *task_struct_type;
5875 static bool bpf_scx_is_valid_access(int off, int size,
5876 enum bpf_access_type type,
5877 const struct bpf_prog *prog,
5878 struct bpf_insn_access_aux *info)
5880 if (type != BPF_READ)
5882 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5884 if (off % size != 0)
5887 return btf_ctx_access(off, size, type, prog, info);
5890 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5891 const struct bpf_reg_state *reg, int off,
5894 const struct btf_type *t;
5896 t = btf_type_by_id(reg->btf, reg->btf_id);
5897 if (t == task_struct_type) {
5898 if (off >= offsetof(struct task_struct, scx.slice) &&
5899 off + size <= offsetofend(struct task_struct, scx.slice))
5900 return SCALAR_VALUE;
5901 if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5902 off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5903 return SCALAR_VALUE;
5904 if (off >= offsetof(struct task_struct, scx.disallow) &&
5905 off + size <= offsetofend(struct task_struct, scx.disallow))
5906 return SCALAR_VALUE;
5912 static const struct bpf_func_proto *
5913 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5916 case BPF_FUNC_task_storage_get:
5917 return &bpf_task_storage_get_proto;
5918 case BPF_FUNC_task_storage_delete:
5919 return &bpf_task_storage_delete_proto;
5921 return bpf_base_func_proto(func_id, prog);
5925 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5926 .get_func_proto = bpf_scx_get_func_proto,
5927 .is_valid_access = bpf_scx_is_valid_access,
5928 .btf_struct_access = bpf_scx_btf_struct_access,
5931 static int bpf_scx_init_member(const struct btf_type *t,
5932 const struct btf_member *member,
5933 void *kdata, const void *udata)
5935 const struct sched_ext_ops *uops = udata;
5936 struct sched_ext_ops *ops = kdata;
5937 u32 moff = __btf_member_bit_offset(t, member) / 8;
5941 case offsetof(struct sched_ext_ops, dispatch_max_batch):
5942 if (*(u32 *)(udata + moff) > INT_MAX)
5944 ops->dispatch_max_batch = *(u32 *)(udata + moff);
5946 case offsetof(struct sched_ext_ops, flags):
5947 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5949 ops->flags = *(u64 *)(udata + moff);
5951 case offsetof(struct sched_ext_ops, name):
5952 ret = bpf_obj_name_cpy(ops->name, uops->name,
5959 case offsetof(struct sched_ext_ops, timeout_ms):
5960 if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5961 SCX_WATCHDOG_MAX_TIMEOUT)
5963 ops->timeout_ms = *(u32 *)(udata + moff);
5965 case offsetof(struct sched_ext_ops, exit_dump_len):
5966 ops->exit_dump_len =
5967 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5969 case offsetof(struct sched_ext_ops, hotplug_seq):
5970 ops->hotplug_seq = *(u64 *)(udata + moff);
5977 static int bpf_scx_check_member(const struct btf_type *t,
5978 const struct btf_member *member,
5979 const struct bpf_prog *prog)
5981 u32 moff = __btf_member_bit_offset(t, member) / 8;
5984 case offsetof(struct sched_ext_ops, init_task):
5985 #ifdef CONFIG_EXT_GROUP_SCHED
5986 case offsetof(struct sched_ext_ops, cgroup_init):
5987 case offsetof(struct sched_ext_ops, cgroup_exit):
5988 case offsetof(struct sched_ext_ops, cgroup_prep_move):
5990 case offsetof(struct sched_ext_ops, cpu_online):
5991 case offsetof(struct sched_ext_ops, cpu_offline):
5992 case offsetof(struct sched_ext_ops, init):
5993 case offsetof(struct sched_ext_ops, exit):
5996 if (prog->sleepable)
6003 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
6005 return scx_ops_enable(kdata, link);
6008 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
6010 scx_ops_disable(SCX_EXIT_UNREG);
6011 kthread_flush_work(&scx_ops_disable_work);
6014 static int bpf_scx_init(struct btf *btf)
6016 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
6021 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
6024 * sched_ext does not support updating the actively-loaded BPF
6025 * scheduler, as registering a BPF scheduler can always fail if the
6026 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
6027 * etc. Similarly, we can always race with unregistration happening
6028 * elsewhere, such as with sysrq.
6033 static int bpf_scx_validate(void *kdata)
6038 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
6039 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
6040 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
6041 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
6042 static void sched_ext_ops__tick(struct task_struct *p) {}
6043 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
6044 static void sched_ext_ops__running(struct task_struct *p) {}
6045 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
6046 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
6047 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
6048 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
6049 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
6050 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
6051 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
6052 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
6053 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
6054 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
6055 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
6056 static void sched_ext_ops__enable(struct task_struct *p) {}
6057 static void sched_ext_ops__disable(struct task_struct *p) {}
6058 #ifdef CONFIG_EXT_GROUP_SCHED
6059 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
6060 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
6061 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
6062 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
6063 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
6064 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
6066 static void sched_ext_ops__cpu_online(s32 cpu) {}
6067 static void sched_ext_ops__cpu_offline(s32 cpu) {}
6068 static s32 sched_ext_ops__init(void) { return -EINVAL; }
6069 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
6070 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
6071 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
6072 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
6074 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
6075 .select_cpu = sched_ext_ops__select_cpu,
6076 .enqueue = sched_ext_ops__enqueue,
6077 .dequeue = sched_ext_ops__dequeue,
6078 .dispatch = sched_ext_ops__dispatch,
6079 .tick = sched_ext_ops__tick,
6080 .runnable = sched_ext_ops__runnable,
6081 .running = sched_ext_ops__running,
6082 .stopping = sched_ext_ops__stopping,
6083 .quiescent = sched_ext_ops__quiescent,
6084 .yield = sched_ext_ops__yield,
6085 .core_sched_before = sched_ext_ops__core_sched_before,
6086 .set_weight = sched_ext_ops__set_weight,
6087 .set_cpumask = sched_ext_ops__set_cpumask,
6088 .update_idle = sched_ext_ops__update_idle,
6089 .cpu_acquire = sched_ext_ops__cpu_acquire,
6090 .cpu_release = sched_ext_ops__cpu_release,
6091 .init_task = sched_ext_ops__init_task,
6092 .exit_task = sched_ext_ops__exit_task,
6093 .enable = sched_ext_ops__enable,
6094 .disable = sched_ext_ops__disable,
6095 #ifdef CONFIG_EXT_GROUP_SCHED
6096 .cgroup_init = sched_ext_ops__cgroup_init,
6097 .cgroup_exit = sched_ext_ops__cgroup_exit,
6098 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move,
6099 .cgroup_move = sched_ext_ops__cgroup_move,
6100 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move,
6101 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight,
6103 .cpu_online = sched_ext_ops__cpu_online,
6104 .cpu_offline = sched_ext_ops__cpu_offline,
6105 .init = sched_ext_ops__init,
6106 .exit = sched_ext_ops__exit,
6107 .dump = sched_ext_ops__dump,
6108 .dump_cpu = sched_ext_ops__dump_cpu,
6109 .dump_task = sched_ext_ops__dump_task,
6112 static struct bpf_struct_ops bpf_sched_ext_ops = {
6113 .verifier_ops = &bpf_scx_verifier_ops,
6115 .unreg = bpf_scx_unreg,
6116 .check_member = bpf_scx_check_member,
6117 .init_member = bpf_scx_init_member,
6118 .init = bpf_scx_init,
6119 .update = bpf_scx_update,
6120 .validate = bpf_scx_validate,
6121 .name = "sched_ext_ops",
6122 .owner = THIS_MODULE,
6123 .cfi_stubs = &__bpf_ops_sched_ext_ops
6127 /********************************************************************************
6128 * System integration and init.
6131 static void sysrq_handle_sched_ext_reset(u8 key)
6134 scx_ops_disable(SCX_EXIT_SYSRQ);
6136 pr_info("sched_ext: BPF scheduler not yet used\n");
6139 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
6140 .handler = sysrq_handle_sched_ext_reset,
6141 .help_msg = "reset-sched-ext(S)",
6142 .action_msg = "Disable sched_ext and revert all tasks to CFS",
6143 .enable_mask = SYSRQ_ENABLE_RTNICE,
6146 static void sysrq_handle_sched_ext_dump(u8 key)
6148 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
6151 scx_dump_state(&ei, 0);
6154 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
6155 .handler = sysrq_handle_sched_ext_dump,
6156 .help_msg = "dump-sched-ext(D)",
6157 .action_msg = "Trigger sched_ext debug dump",
6158 .enable_mask = SYSRQ_ENABLE_RTNICE,
6161 static bool can_skip_idle_kick(struct rq *rq)
6163 lockdep_assert_rq_held(rq);
6166 * We can skip idle kicking if @rq is going to go through at least one
6167 * full SCX scheduling cycle before going idle. Just checking whether
6168 * curr is not idle is insufficient because we could be racing
6169 * balance_one() trying to pull the next task from a remote rq, which
6170 * may fail, and @rq may become idle afterwards.
6172 * The race window is small and we don't and can't guarantee that @rq is
6173 * only kicked while idle anyway. Skip only when sure.
6175 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
6178 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
6180 struct rq *rq = cpu_rq(cpu);
6181 struct scx_rq *this_scx = &this_rq->scx;
6182 bool should_wait = false;
6183 unsigned long flags;
6185 raw_spin_rq_lock_irqsave(rq, flags);
6188 * During CPU hotplug, a CPU may depend on kicking itself to make
6189 * forward progress. Allow kicking self regardless of online state.
6191 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
6192 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
6193 if (rq->curr->sched_class == &ext_sched_class)
6194 rq->curr->scx.slice = 0;
6195 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6198 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
6199 pseqs[cpu] = rq->scx.pnt_seq;
6205 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6206 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6209 raw_spin_rq_unlock_irqrestore(rq, flags);
6214 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
6216 struct rq *rq = cpu_rq(cpu);
6217 unsigned long flags;
6219 raw_spin_rq_lock_irqsave(rq, flags);
6221 if (!can_skip_idle_kick(rq) &&
6222 (cpu_online(cpu) || cpu == cpu_of(this_rq)))
6225 raw_spin_rq_unlock_irqrestore(rq, flags);
6228 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
6230 struct rq *this_rq = this_rq();
6231 struct scx_rq *this_scx = &this_rq->scx;
6232 unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
6233 bool should_wait = false;
6236 for_each_cpu(cpu, this_scx->cpus_to_kick) {
6237 should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
6238 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
6239 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6242 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
6243 kick_one_cpu_if_idle(cpu, this_rq);
6244 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6250 for_each_cpu(cpu, this_scx->cpus_to_wait) {
6251 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
6253 if (cpu != cpu_of(this_rq)) {
6255 * Pairs with smp_store_release() issued by this CPU in
6256 * switch_class() on the resched path.
6258 * We busy-wait here to guarantee that no other task can
6259 * be scheduled on our core before the target CPU has
6260 * entered the resched path.
6262 while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
6266 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6271 * print_scx_info - print out sched_ext scheduler state
6272 * @log_lvl: the log level to use when printing
6275 * If a sched_ext scheduler is enabled, print the name and state of the
6276 * scheduler. If @p is on sched_ext, print further information about the task.
6278 * This function can be safely called on any task as long as the task_struct
6279 * itself is accessible. While safe, this function isn't synchronized and may
6280 * print out mixups or garbages of limited length.
6282 void print_scx_info(const char *log_lvl, struct task_struct *p)
6284 enum scx_ops_enable_state state = scx_ops_enable_state();
6285 const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
6286 char runnable_at_buf[22] = "?";
6287 struct sched_class *class;
6288 unsigned long runnable_at;
6290 if (state == SCX_OPS_DISABLED)
6294 * Carefully check if the task was running on sched_ext, and then
6295 * carefully copy the time it's been runnable, and its state.
6297 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
6298 class != &ext_sched_class) {
6299 printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
6300 scx_ops_enable_state_str[state], all);
6304 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
6305 sizeof(runnable_at)))
6306 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
6307 jiffies_delta_msecs(runnable_at, jiffies));
6309 /* print everything onto one line to conserve console space */
6310 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
6311 log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
6315 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
6318 * SCX schedulers often have userspace components which are sometimes
6319 * involved in critial scheduling paths. PM operations involve freezing
6320 * userspace which can lead to scheduling misbehaviors including stalls.
6321 * Let's bypass while PM operations are in progress.
6324 case PM_HIBERNATION_PREPARE:
6325 case PM_SUSPEND_PREPARE:
6326 case PM_RESTORE_PREPARE:
6327 scx_ops_bypass(true);
6329 case PM_POST_HIBERNATION:
6330 case PM_POST_SUSPEND:
6331 case PM_POST_RESTORE:
6332 scx_ops_bypass(false);
6339 static struct notifier_block scx_pm_notifier = {
6340 .notifier_call = scx_pm_handler,
6343 void __init init_sched_ext_class(void)
6348 * The following is to prevent the compiler from optimizing out the enum
6349 * definitions so that BPF scheduler implementations can use them
6350 * through the generated vmlinux.h.
6352 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
6355 BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
6357 BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
6358 BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
6360 scx_kick_cpus_pnt_seqs =
6361 __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
6362 __alignof__(scx_kick_cpus_pnt_seqs[0]));
6363 BUG_ON(!scx_kick_cpus_pnt_seqs);
6365 for_each_possible_cpu(cpu) {
6366 struct rq *rq = cpu_rq(cpu);
6368 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
6369 INIT_LIST_HEAD(&rq->scx.runnable_list);
6370 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
6372 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
6373 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
6374 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
6375 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
6376 init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
6377 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
6379 if (cpu_online(cpu))
6380 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
6383 register_sysrq_key('S', &sysrq_sched_ext_reset_op);
6384 register_sysrq_key('D', &sysrq_sched_ext_dump_op);
6385 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
6389 /********************************************************************************
6390 * Helpers that can be called from the BPF scheduler.
6392 #include <linux/btf_ids.h>
6394 __bpf_kfunc_start_defs();
6396 static bool check_builtin_idle_enabled(void)
6398 if (static_branch_likely(&scx_builtin_idle_enabled))
6401 scx_ops_error("built-in idle tracking is disabled");
6406 * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
6407 * @p: task_struct to select a CPU for
6408 * @prev_cpu: CPU @p was on previously
6409 * @wake_flags: %SCX_WAKE_* flags
6410 * @is_idle: out parameter indicating whether the returned CPU is idle
6412 * Can only be called from ops.select_cpu() if the built-in CPU selection is
6413 * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
6414 * @p, @prev_cpu and @wake_flags match ops.select_cpu().
6416 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
6417 * currently idle and thus a good candidate for direct dispatching.
6419 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
6420 u64 wake_flags, bool *is_idle)
6422 if (!check_builtin_idle_enabled())
6425 if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
6429 return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
6437 __bpf_kfunc_end_defs();
6439 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
6440 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
6441 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
6443 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
6444 .owner = THIS_MODULE,
6445 .set = &scx_kfunc_ids_select_cpu,
6448 static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
6450 if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
6453 lockdep_assert_irqs_disabled();
6456 scx_ops_error("called with NULL task");
6460 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
6461 scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
6468 static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id,
6471 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6472 struct task_struct *ddsp_task;
6474 ddsp_task = __this_cpu_read(direct_dispatch_task);
6476 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
6480 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
6481 scx_ops_error("dispatch buffer overflow");
6485 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
6487 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
6489 .enq_flags = enq_flags,
6493 __bpf_kfunc_start_defs();
6496 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6497 * @p: task_struct to insert
6498 * @dsq_id: DSQ to insert into
6499 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6500 * @enq_flags: SCX_ENQ_*
6502 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6503 * call this function spuriously. Can be called from ops.enqueue(),
6504 * ops.select_cpu(), and ops.dispatch().
6506 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
6507 * and @p must match the task being enqueued.
6509 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6510 * will be directly inserted into the corresponding dispatch queue after
6511 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6512 * inserted into the local DSQ of the CPU returned by ops.select_cpu().
6513 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6516 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6517 * and this function can be called upto ops.dispatch_max_batch times to insert
6518 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6519 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6521 * This function doesn't have any locking restrictions and may be called under
6522 * BPF locks (in the future when BPF introduces more flexible locking).
6524 * @p is allowed to run for @slice. The scheduling path is triggered on slice
6525 * exhaustion. If zero, the current residual slice is maintained. If
6526 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6527 * scx_bpf_kick_cpu() to trigger scheduling.
6529 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice,
6532 if (!scx_dsq_insert_preamble(p, enq_flags))
6536 p->scx.slice = slice;
6538 p->scx.slice = p->scx.slice ?: 1;
6540 scx_dsq_insert_commit(p, dsq_id, enq_flags);
6543 /* for backward compatibility, will be removed in v6.15 */
6544 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
6547 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()");
6548 scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags);
6552 * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6553 * @p: task_struct to insert
6554 * @dsq_id: DSQ to insert into
6555 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6556 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6557 * @enq_flags: SCX_ENQ_*
6559 * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
6560 * Tasks queued into the priority queue are ordered by @vtime. All other aspects
6561 * are identical to scx_bpf_dsq_insert().
6563 * @vtime ordering is according to time_before64() which considers wrapping. A
6564 * numerically larger vtime may indicate an earlier position in the ordering and
6567 * A DSQ can only be used as a FIFO or priority queue at any given time and this
6568 * function must not be called on a DSQ which already has one or more FIFO tasks
6569 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
6570 * SCX_DSQ_GLOBAL) cannot be used as priority queues.
6572 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
6573 u64 slice, u64 vtime, u64 enq_flags)
6575 if (!scx_dsq_insert_preamble(p, enq_flags))
6579 p->scx.slice = slice;
6581 p->scx.slice = p->scx.slice ?: 1;
6583 p->scx.dsq_vtime = vtime;
6585 scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6588 /* for backward compatibility, will be removed in v6.15 */
6589 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6590 u64 slice, u64 vtime, u64 enq_flags)
6592 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()");
6593 scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags);
6596 __bpf_kfunc_end_defs();
6598 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6599 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
6600 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
6601 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6602 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6603 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6605 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6606 .owner = THIS_MODULE,
6607 .set = &scx_kfunc_ids_enqueue_dispatch,
6610 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6611 struct task_struct *p, u64 dsq_id, u64 enq_flags)
6613 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6614 struct rq *this_rq, *src_rq, *locked_rq;
6615 bool dispatched = false;
6617 unsigned long flags;
6619 if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6623 * Can be called from either ops.dispatch() locking this_rq() or any
6624 * context where no rq lock is held. If latter, lock @p's task_rq which
6625 * we'll likely need anyway.
6627 src_rq = task_rq(p);
6629 local_irq_save(flags);
6630 this_rq = this_rq();
6631 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6634 if (this_rq != src_rq) {
6635 raw_spin_rq_unlock(this_rq);
6636 raw_spin_rq_lock(src_rq);
6639 raw_spin_rq_lock(src_rq);
6643 * If the BPF scheduler keeps calling this function repeatedly, it can
6644 * cause similar live-lock conditions as consume_dispatch_q(). Insert a
6645 * breather if necessary.
6647 scx_ops_breather(src_rq);
6650 raw_spin_lock(&src_dsq->lock);
6653 * Did someone else get to it? @p could have already left $src_dsq, got
6654 * re-enqueud, or be in the process of being consumed by someone else.
6656 if (unlikely(p->scx.dsq != src_dsq ||
6657 u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6658 p->scx.holding_cpu >= 0) ||
6659 WARN_ON_ONCE(src_rq != task_rq(p))) {
6660 raw_spin_unlock(&src_dsq->lock);
6664 /* @p is still on $src_dsq and stable, determine the destination */
6665 dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
6668 * Apply vtime and slice updates before moving so that the new time is
6669 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6670 * this is safe as we're locking it.
6672 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6673 p->scx.dsq_vtime = kit->vtime;
6674 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6675 p->scx.slice = kit->slice;
6678 locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
6682 if (this_rq != locked_rq) {
6683 raw_spin_rq_unlock(locked_rq);
6684 raw_spin_rq_lock(this_rq);
6687 raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6690 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6691 __SCX_DSQ_ITER_HAS_VTIME);
6695 __bpf_kfunc_start_defs();
6698 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6700 * Can only be called from ops.dispatch().
6702 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6704 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6707 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6711 * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6713 * Cancel the latest dispatch. Can be called multiple times to cancel further
6714 * dispatches. Can only be called from ops.dispatch().
6716 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6718 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6720 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6723 if (dspc->cursor > 0)
6726 scx_ops_error("dispatch buffer underflow");
6730 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6731 * @dsq_id: DSQ to move task from
6733 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6734 * local DSQ for execution. Can only be called from ops.dispatch().
6736 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6737 * before trying to move from the specified DSQ. It may also grab rq locks and
6738 * thus can't be called under any BPF locks.
6740 * Returns %true if a task has been moved, %false if there isn't any task to
6743 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6745 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6746 struct scx_dispatch_q *dsq;
6748 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6751 flush_dispatch_buf(dspc->rq);
6753 dsq = find_user_dsq(dsq_id);
6754 if (unlikely(!dsq)) {
6755 scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
6759 if (consume_dispatch_q(dspc->rq, dsq)) {
6761 * A successfully consumed task can be dequeued before it starts
6762 * running while the CPU is trying to migrate other dispatched
6763 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6773 /* for backward compatibility, will be removed in v6.15 */
6774 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6776 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()");
6777 return scx_bpf_dsq_move_to_local(dsq_id);
6781 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6782 * @it__iter: DSQ iterator in progress
6783 * @slice: duration the moved task can run for in nsecs
6785 * Override the slice of the next task that will be moved from @it__iter using
6786 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6787 * slice duration is kept.
6789 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6792 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6795 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6798 /* for backward compatibility, will be removed in v6.15 */
6799 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6800 struct bpf_iter_scx_dsq *it__iter, u64 slice)
6802 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()");
6803 scx_bpf_dsq_move_set_slice(it__iter, slice);
6807 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6808 * @it__iter: DSQ iterator in progress
6809 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6811 * Override the vtime of the next task that will be moved from @it__iter using
6812 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6813 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6814 * override is ignored and cleared.
6816 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6819 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6822 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6825 /* for backward compatibility, will be removed in v6.15 */
6826 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6827 struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6829 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()");
6830 scx_bpf_dsq_move_set_vtime(it__iter, vtime);
6834 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6835 * @it__iter: DSQ iterator in progress
6836 * @p: task to transfer
6837 * @dsq_id: DSQ to move @p to
6838 * @enq_flags: SCX_ENQ_*
6840 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6841 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6842 * be the destination.
6844 * For the transfer to be successful, @p must still be on the DSQ and have been
6845 * queued before the DSQ iteration started. This function doesn't care whether
6846 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6847 * been queued before the iteration started.
6849 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6851 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6852 * lock (e.g. BPF timers or SYSCALL programs).
6854 * Returns %true if @p has been consumed, %false if @p had already been consumed
6857 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6858 struct task_struct *p, u64 dsq_id,
6861 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6862 p, dsq_id, enq_flags);
6865 /* for backward compatibility, will be removed in v6.15 */
6866 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6867 struct task_struct *p, u64 dsq_id,
6870 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()");
6871 return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags);
6875 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6876 * @it__iter: DSQ iterator in progress
6877 * @p: task to transfer
6878 * @dsq_id: DSQ to move @p to
6879 * @enq_flags: SCX_ENQ_*
6881 * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6882 * priority queue of the DSQ specified by @dsq_id. The destination must be a
6883 * user DSQ as only user DSQs support priority queue.
6885 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6886 * and scx_bpf_dsq_move_set_vtime() to update.
6888 * All other aspects are identical to scx_bpf_dsq_move(). See
6889 * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6891 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6892 struct task_struct *p, u64 dsq_id,
6895 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6896 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6899 /* for backward compatibility, will be removed in v6.15 */
6900 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6901 struct task_struct *p, u64 dsq_id,
6904 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()");
6905 return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags);
6908 __bpf_kfunc_end_defs();
6910 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6911 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6912 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6913 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6914 BTF_ID_FLAGS(func, scx_bpf_consume)
6915 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6916 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6917 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6918 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6919 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6920 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6921 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6922 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6923 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6925 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6926 .owner = THIS_MODULE,
6927 .set = &scx_kfunc_ids_dispatch,
6930 __bpf_kfunc_start_defs();
6933 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6935 * Iterate over all of the tasks currently enqueued on the local DSQ of the
6936 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6937 * processed tasks. Can only be called from ops.cpu_release().
6939 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6942 u32 nr_enqueued = 0;
6944 struct task_struct *p, *n;
6946 if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6949 rq = cpu_rq(smp_processor_id());
6950 lockdep_assert_rq_held(rq);
6953 * The BPF scheduler may choose to dispatch tasks back to
6954 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6955 * first to avoid processing the same tasks repeatedly.
6957 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6958 scx.dsq_list.node) {
6960 * If @p is being migrated, @p's current CPU may not agree with
6961 * its allowed CPUs and the migration_cpu_stop is about to
6962 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6964 * While racing sched property changes may also dequeue and
6965 * re-enqueue a migrating task while its current CPU and allowed
6966 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6967 * the current local DSQ for running tasks and thus are not
6968 * visible to the BPF scheduler.
6970 if (p->migration_pending)
6973 dispatch_dequeue(rq, p);
6974 list_add_tail(&p->scx.dsq_list.node, &tasks);
6977 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6978 list_del_init(&p->scx.dsq_list.node);
6979 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6986 __bpf_kfunc_end_defs();
6988 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6989 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6990 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6992 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6993 .owner = THIS_MODULE,
6994 .set = &scx_kfunc_ids_cpu_release,
6997 __bpf_kfunc_start_defs();
7000 * scx_bpf_create_dsq - Create a custom DSQ
7001 * @dsq_id: DSQ to create
7002 * @node: NUMA node to allocate from
7004 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
7005 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
7007 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
7009 if (unlikely(node >= (int)nr_node_ids ||
7010 (node < 0 && node != NUMA_NO_NODE)))
7012 return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
7015 __bpf_kfunc_end_defs();
7017 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
7018 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
7019 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
7020 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
7021 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
7022 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
7023 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
7024 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
7025 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
7026 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
7027 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
7029 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
7030 .owner = THIS_MODULE,
7031 .set = &scx_kfunc_ids_unlocked,
7034 __bpf_kfunc_start_defs();
7037 * scx_bpf_kick_cpu - Trigger reschedule on a CPU
7039 * @flags: %SCX_KICK_* flags
7041 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
7042 * trigger rescheduling on a busy CPU. This can be called from any online
7043 * scx_ops operation and the actual kicking is performed asynchronously through
7046 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
7049 unsigned long irq_flags;
7051 if (!ops_cpu_valid(cpu, NULL))
7054 local_irq_save(irq_flags);
7056 this_rq = this_rq();
7059 * While bypassing for PM ops, IRQ handling may not be online which can
7060 * lead to irq_work_queue() malfunction such as infinite busy wait for
7061 * IRQ status update. Suppress kicking.
7063 if (scx_rq_bypassing(this_rq))
7067 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
7068 * rq locks. We can probably be smarter and avoid bouncing if called
7069 * from ops which don't hold a rq lock.
7071 if (flags & SCX_KICK_IDLE) {
7072 struct rq *target_rq = cpu_rq(cpu);
7074 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
7075 scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
7077 if (raw_spin_rq_trylock(target_rq)) {
7078 if (can_skip_idle_kick(target_rq)) {
7079 raw_spin_rq_unlock(target_rq);
7082 raw_spin_rq_unlock(target_rq);
7084 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
7086 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
7088 if (flags & SCX_KICK_PREEMPT)
7089 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
7090 if (flags & SCX_KICK_WAIT)
7091 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
7094 irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
7096 local_irq_restore(irq_flags);
7100 * scx_bpf_dsq_nr_queued - Return the number of queued tasks
7101 * @dsq_id: id of the DSQ
7103 * Return the number of tasks in the DSQ matching @dsq_id. If not found,
7104 * -%ENOENT is returned.
7106 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
7108 struct scx_dispatch_q *dsq;
7113 if (dsq_id == SCX_DSQ_LOCAL) {
7114 ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
7116 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
7117 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
7119 if (ops_cpu_valid(cpu, NULL)) {
7120 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
7124 dsq = find_user_dsq(dsq_id);
7126 ret = READ_ONCE(dsq->nr);
7137 * scx_bpf_destroy_dsq - Destroy a custom DSQ
7138 * @dsq_id: DSQ to destroy
7140 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
7141 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
7142 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
7143 * which doesn't exist. Can be called from any online scx_ops operations.
7145 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
7147 destroy_dsq(dsq_id);
7151 * bpf_iter_scx_dsq_new - Create a DSQ iterator
7152 * @it: iterator to initialize
7153 * @dsq_id: DSQ to iterate
7154 * @flags: %SCX_DSQ_ITER_*
7156 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
7157 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
7158 * tasks which are already queued when this function is invoked.
7160 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
7163 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7165 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
7166 sizeof(struct bpf_iter_scx_dsq));
7167 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
7168 __alignof__(struct bpf_iter_scx_dsq));
7170 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
7173 kit->dsq = find_user_dsq(dsq_id);
7177 INIT_LIST_HEAD(&kit->cursor.node);
7178 kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
7179 kit->cursor.priv = READ_ONCE(kit->dsq->seq);
7185 * bpf_iter_scx_dsq_next - Progress a DSQ iterator
7186 * @it: iterator to progress
7188 * Return the next task. See bpf_iter_scx_dsq_new().
7190 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
7192 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7193 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
7194 struct task_struct *p;
7195 unsigned long flags;
7200 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7202 if (list_empty(&kit->cursor.node))
7205 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
7208 * Only tasks which were queued before the iteration started are
7209 * visible. This bounds BPF iterations and guarantees that vtime never
7210 * jumps in the other direction while iterating.
7213 p = nldsq_next_task(kit->dsq, p, rev);
7214 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
7218 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
7220 list_move(&kit->cursor.node, &p->scx.dsq_list.node);
7222 list_del_init(&kit->cursor.node);
7225 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7231 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
7232 * @it: iterator to destroy
7234 * Undo scx_iter_scx_dsq_new().
7236 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
7238 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7243 if (!list_empty(&kit->cursor.node)) {
7244 unsigned long flags;
7246 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7247 list_del_init(&kit->cursor.node);
7248 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7253 __bpf_kfunc_end_defs();
7255 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
7256 char *fmt, unsigned long long *data, u32 data__sz)
7258 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
7261 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
7262 (data__sz && !data)) {
7263 scx_ops_error("invalid data=%p and data__sz=%u",
7264 (void *)data, data__sz);
7268 ret = copy_from_kernel_nofault(data_buf, data, data__sz);
7270 scx_ops_error("failed to read data fields (%d)", ret);
7274 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
7277 scx_ops_error("format preparation failed (%d)", ret);
7281 ret = bstr_printf(line_buf, line_size, fmt,
7282 bprintf_data.bin_args);
7283 bpf_bprintf_cleanup(&bprintf_data);
7285 scx_ops_error("(\"%s\", %p, %u) failed to format",
7286 fmt, data, data__sz);
7293 static s32 bstr_format(struct scx_bstr_buf *buf,
7294 char *fmt, unsigned long long *data, u32 data__sz)
7296 return __bstr_format(buf->data, buf->line, sizeof(buf->line),
7297 fmt, data, data__sz);
7300 __bpf_kfunc_start_defs();
7303 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
7304 * @exit_code: Exit value to pass to user space via struct scx_exit_info.
7305 * @fmt: error message format string
7306 * @data: format string parameters packaged using ___bpf_fill() macro
7307 * @data__sz: @data len, must end in '__sz' for the verifier
7309 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
7312 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
7313 unsigned long long *data, u32 data__sz)
7315 unsigned long flags;
7317 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7318 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7319 scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
7320 scx_exit_bstr_buf.line);
7321 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7325 * scx_bpf_error_bstr - Indicate fatal error
7326 * @fmt: error message format string
7327 * @data: format string parameters packaged using ___bpf_fill() macro
7328 * @data__sz: @data len, must end in '__sz' for the verifier
7330 * Indicate that the BPF scheduler encountered a fatal error and initiate ops
7333 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
7336 unsigned long flags;
7338 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7339 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7340 scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
7341 scx_exit_bstr_buf.line);
7342 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7346 * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
7347 * @fmt: format string
7348 * @data: format string parameters packaged using ___bpf_fill() macro
7349 * @data__sz: @data len, must end in '__sz' for the verifier
7351 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
7352 * dump_task() to generate extra debug dump specific to the BPF scheduler.
7354 * The extra dump may be multiple lines. A single line may be split over
7355 * multiple calls. The last line is automatically terminated.
7357 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
7360 struct scx_dump_data *dd = &scx_dump_data;
7361 struct scx_bstr_buf *buf = &dd->buf;
7364 if (raw_smp_processor_id() != dd->cpu) {
7365 scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
7369 /* append the formatted string to the line buf */
7370 ret = __bstr_format(buf->data, buf->line + dd->cursor,
7371 sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
7373 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
7374 dd->prefix, fmt, data, data__sz, ret);
7379 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
7385 * If the line buf overflowed or ends in a newline, flush it into the
7386 * dump. This is to allow the caller to generate a single line over
7387 * multiple calls. As ops_dump_flush() can also handle multiple lines in
7388 * the line buf, the only case which can lead to an unexpected
7389 * truncation is when the caller keeps generating newlines in the middle
7390 * instead of the end consecutively. Don't do that.
7392 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
7397 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
7398 * @cpu: CPU of interest
7400 * Return the maximum relative capacity of @cpu in relation to the most
7401 * performant CPU in the system. The return value is in the range [1,
7402 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
7404 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
7406 if (ops_cpu_valid(cpu, NULL))
7407 return arch_scale_cpu_capacity(cpu);
7409 return SCX_CPUPERF_ONE;
7413 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
7414 * @cpu: CPU of interest
7416 * Return the current relative performance of @cpu in relation to its maximum.
7417 * The return value is in the range [1, %SCX_CPUPERF_ONE].
7419 * The current performance level of a CPU in relation to the maximum performance
7420 * available in the system can be calculated as follows:
7422 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
7424 * The result is in the range [1, %SCX_CPUPERF_ONE].
7426 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
7428 if (ops_cpu_valid(cpu, NULL))
7429 return arch_scale_freq_capacity(cpu);
7431 return SCX_CPUPERF_ONE;
7435 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
7436 * @cpu: CPU of interest
7437 * @perf: target performance level [0, %SCX_CPUPERF_ONE]
7439 * Set the target performance level of @cpu to @perf. @perf is in linear
7440 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
7441 * schedutil cpufreq governor chooses the target frequency.
7443 * The actual performance level chosen, CPU grouping, and the overhead and
7444 * latency of the operations are dependent on the hardware and cpufreq driver in
7445 * use. Consult hardware and cpufreq documentation for more information. The
7446 * current performance level can be monitored using scx_bpf_cpuperf_cur().
7448 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
7450 if (unlikely(perf > SCX_CPUPERF_ONE)) {
7451 scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
7455 if (ops_cpu_valid(cpu, NULL)) {
7456 struct rq *rq = cpu_rq(cpu);
7458 rq->scx.cpuperf_target = perf;
7460 rcu_read_lock_sched_notrace();
7461 cpufreq_update_util(cpu_rq(cpu), 0);
7462 rcu_read_unlock_sched_notrace();
7467 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
7469 * All valid CPU IDs in the system are smaller than the returned value.
7471 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
7477 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
7479 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
7481 return cpu_possible_mask;
7485 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
7487 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
7489 return cpu_online_mask;
7493 * scx_bpf_put_cpumask - Release a possible/online cpumask
7494 * @cpumask: cpumask to release
7496 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
7499 * Empty function body because we aren't actually acquiring or releasing
7500 * a reference to a global cpumask, which is read-only in the caller and
7501 * is never released. The acquire / release semantics here are just used
7502 * to make the cpumask is a trusted pointer in the caller.
7507 * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
7510 * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7512 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
7514 if (!check_builtin_idle_enabled())
7515 return cpu_none_mask;
7518 return idle_masks.cpu;
7520 return cpu_none_mask;
7525 * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
7526 * per-physical-core cpumask. Can be used to determine if an entire physical
7529 * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7531 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
7533 if (!check_builtin_idle_enabled())
7534 return cpu_none_mask;
7537 if (sched_smt_active())
7538 return idle_masks.smt;
7540 return idle_masks.cpu;
7542 return cpu_none_mask;
7547 * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
7548 * either the percpu, or SMT idle-tracking cpumask.
7549 * @idle_mask: &cpumask to use
7551 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
7554 * Empty function body because we aren't actually acquiring or releasing
7555 * a reference to a global idle cpumask, which is read-only in the
7556 * caller and is never released. The acquire / release semantics here
7557 * are just used to make the cpumask a trusted pointer in the caller.
7562 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
7563 * @cpu: cpu to test and clear idle for
7565 * Returns %true if @cpu was idle and its idle state was successfully cleared.
7568 * Unavailable if ops.update_idle() is implemented and
7569 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7571 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
7573 if (!check_builtin_idle_enabled())
7576 if (ops_cpu_valid(cpu, NULL))
7577 return test_and_clear_cpu_idle(cpu);
7583 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
7584 * @cpus_allowed: Allowed cpumask
7585 * @flags: %SCX_PICK_IDLE_CPU_* flags
7587 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
7588 * number on success. -%EBUSY if no matching cpu was found.
7590 * Idle CPU tracking may race against CPU scheduling state transitions. For
7591 * example, this function may return -%EBUSY as CPUs are transitioning into the
7592 * idle state. If the caller then assumes that there will be dispatch events on
7593 * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
7594 * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
7595 * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
7596 * event in the near future.
7598 * Unavailable if ops.update_idle() is implemented and
7599 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7601 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
7604 if (!check_builtin_idle_enabled())
7607 return scx_pick_idle_cpu(cpus_allowed, flags);
7611 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
7612 * @cpus_allowed: Allowed cpumask
7613 * @flags: %SCX_PICK_IDLE_CPU_* flags
7615 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
7616 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
7617 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
7620 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
7621 * set, this function can't tell which CPUs are idle and will always pick any
7624 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
7629 if (static_branch_likely(&scx_builtin_idle_enabled)) {
7630 cpu = scx_pick_idle_cpu(cpus_allowed, flags);
7635 cpu = cpumask_any_distribute(cpus_allowed);
7636 if (cpu < nr_cpu_ids)
7643 * scx_bpf_task_running - Is task currently running?
7644 * @p: task of interest
7646 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7648 return task_rq(p)->curr == p;
7652 * scx_bpf_task_cpu - CPU a task is currently associated with
7653 * @p: task of interest
7655 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7661 * scx_bpf_cpu_rq - Fetch the rq of a CPU
7662 * @cpu: CPU of the rq
7664 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7666 if (!ops_cpu_valid(cpu, NULL))
7673 * scx_bpf_task_cgroup - Return the sched cgroup of a task
7674 * @p: task of interest
7676 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7677 * from the scheduler's POV. SCX operations should use this function to
7678 * determine @p's current cgroup as, unlike following @p->cgroups,
7679 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7680 * rq-locked operations. Can be called on the parameter tasks of rq-locked
7681 * operations. The restriction guarantees that @p's rq is locked by the caller.
7683 #ifdef CONFIG_CGROUP_SCHED
7684 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7686 struct task_group *tg = p->sched_task_group;
7687 struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7689 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7701 * scx_bpf_now - Returns a high-performance monotonically non-decreasing
7702 * clock for the current CPU. The clock returned is in nanoseconds.
7704 * It provides the following properties:
7706 * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
7707 * to account for execution time and track tasks' runtime properties.
7708 * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
7709 * eventually reads a hardware timestamp counter -- is neither performant nor
7710 * scalable. scx_bpf_now() aims to provide a high-performance clock by
7711 * using the rq clock in the scheduler core whenever possible.
7713 * 2) High enough resolution for the BPF scheduler use cases: In most BPF
7714 * scheduler use cases, the required clock resolution is lower than the most
7715 * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
7716 * uses the rq clock in the scheduler core whenever it is valid. It considers
7717 * that the rq clock is valid from the time the rq clock is updated
7718 * (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
7720 * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
7721 * guarantees the clock never goes backward when comparing them in the same
7722 * CPU. On the other hand, when comparing clocks in different CPUs, there
7723 * is no such guarantee -- the clock can go backward. It provides a
7724 * monotonically *non-decreasing* clock so that it would provide the same
7725 * clock values in two different scx_bpf_now() calls in the same CPU
7726 * during the same period of when the rq clock is valid.
7728 __bpf_kfunc u64 scx_bpf_now(void)
7736 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
7738 * If the rq clock is valid, use the cached rq clock.
7740 * Note that scx_bpf_now() is re-entrant between a process
7741 * context and an interrupt context (e.g., timer interrupt).
7742 * However, we don't need to consider the race between them
7743 * because such race is not observable from a caller.
7745 clock = READ_ONCE(rq->scx.clock);
7748 * Otherwise, return a fresh rq clock.
7750 * The rq clock is updated outside of the rq lock.
7751 * In this case, keep the updated rq clock invalid so the next
7752 * kfunc call outside the rq lock gets a fresh rq clock.
7754 clock = sched_clock_cpu(cpu_of(rq));
7762 __bpf_kfunc_end_defs();
7764 BTF_KFUNCS_START(scx_kfunc_ids_any)
7765 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7766 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7767 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7768 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7769 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7770 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7771 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7772 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7773 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7774 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7775 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7776 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7777 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7778 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7779 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7780 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7781 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
7782 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
7783 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
7784 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
7785 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
7786 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
7787 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7788 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7789 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7790 #ifdef CONFIG_CGROUP_SCHED
7791 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7793 BTF_ID_FLAGS(func, scx_bpf_now)
7794 BTF_KFUNCS_END(scx_kfunc_ids_any)
7796 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7797 .owner = THIS_MODULE,
7798 .set = &scx_kfunc_ids_any,
7801 static int __init scx_init(void)
7806 * kfunc registration can't be done from init_sched_ext_class() as
7807 * register_btf_kfunc_id_set() needs most of the system to be up.
7809 * Some kfuncs are context-sensitive and can only be called from
7810 * specific SCX ops. They are grouped into BTF sets accordingly.
7811 * Unfortunately, BPF currently doesn't have a way of enforcing such
7812 * restrictions. Eventually, the verifier should be able to enforce
7813 * them. For now, register them the same and make each kfunc explicitly
7814 * check using scx_kf_allowed().
7816 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7817 &scx_kfunc_set_select_cpu)) ||
7818 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7819 &scx_kfunc_set_enqueue_dispatch)) ||
7820 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7821 &scx_kfunc_set_dispatch)) ||
7822 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7823 &scx_kfunc_set_cpu_release)) ||
7824 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7825 &scx_kfunc_set_unlocked)) ||
7826 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7827 &scx_kfunc_set_unlocked)) ||
7828 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7829 &scx_kfunc_set_any)) ||
7830 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7831 &scx_kfunc_set_any)) ||
7832 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7833 &scx_kfunc_set_any))) {
7834 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7838 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7840 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7844 ret = register_pm_notifier(&scx_pm_notifier);
7846 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7850 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7852 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7856 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7858 pr_err("sched_ext: Failed to add global attributes\n");
7864 __initcall(scx_init);