1 /* SPDX-License-Identifier: GPL-2.0 */
3 * A demo sched_ext flattened cgroup hierarchy scheduler. It implements
4 * hierarchical weight-based cgroup CPU control by flattening the cgroup
5 * hierarchy into a single layer by compounding the active weight share at each
6 * level. Consider the following hierarchy with weights in parentheses:
8 * R + A (100) + B (100)
12 * Ignoring the root and threaded cgroups, only B, C and D can contain tasks.
13 * Let's say all three have runnable tasks. The total share that each of these
14 * three cgroups is entitled to can be calculated by compounding its share at
17 * For example, B is competing against C and in that competition its share is
18 * 100/(100+100) == 1/2. At its parent level, A is competing against D and A's
19 * share in that competition is 100/(200+100) == 1/3. B's eventual share in the
20 * system can be calculated by multiplying the two shares, 1/2 * 1/3 == 1/6. C's
21 * eventual shaer is the same at 1/6. D is only competing at the top level and
22 * its share is 200/(100+200) == 2/3.
24 * So, instead of hierarchically scheduling level-by-level, we can consider it
25 * as B, C and D competing each other with respective share of 1/6, 1/6 and 2/3
26 * and keep updating the eventual shares as the cgroups' runnable states change.
28 * This flattening of hierarchy can bring a substantial performance gain when
29 * the cgroup hierarchy is nested multiple levels. in a simple benchmark using
30 * wrk[8] on apache serving a CGI script calculating sha1sum of a small file, it
31 * outperforms CFS by ~3% with CPU controller disabled and by ~10% with two
32 * apache instances competing with 2:1 weight ratio nested four level deep.
34 * However, the gain comes at the cost of not being able to properly handle
35 * thundering herd of cgroups. For example, if many cgroups which are nested
36 * behind a low priority parent cgroup wake up around the same time, they may be
37 * able to consume more CPU cycles than they are entitled to. In many use cases,
38 * this isn't a real concern especially given the performance gain. Also, there
39 * are ways to mitigate the problem further by e.g. introducing an extra
40 * scheduling layer on cgroup delegation boundaries.
42 * The scheduler first picks the cgroup to run and then schedule the tasks
43 * within by using nested weighted vtime scheduling by default. The
44 * cgroup-internal scheduling can be switched to FIFO with the -f option.
46 #include <scx/common.bpf.h>
47 #include "scx_flatcg.h"
50 * Maximum amount of retries to find a valid cgroup.
54 CGROUP_MAX_RETRIES = 1024,
57 char _license[] SEC("license") = "GPL";
59 const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */
60 const volatile u64 cgrp_slice_ns;
61 const volatile bool fifo_sched;
67 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
70 __uint(max_entries, FCG_NR_STATS);
73 static void stat_inc(enum fcg_stat_idx idx)
77 u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx_v);
88 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
90 __type(value, struct fcg_cpu_ctx);
91 __uint(max_entries, 1);
92 } cpu_ctx SEC(".maps");
95 __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
96 __uint(map_flags, BPF_F_NO_PREALLOC);
98 __type(value, struct fcg_cgrp_ctx);
99 } cgrp_ctx SEC(".maps");
102 struct bpf_rb_node rb_node;
107 private(CGV_TREE) struct bpf_spin_lock cgv_tree_lock;
108 private(CGV_TREE) struct bpf_rb_root cgv_tree __contains(cgv_node, rb_node);
110 struct cgv_node_stash {
111 struct cgv_node __kptr *node;
115 __uint(type, BPF_MAP_TYPE_HASH);
116 __uint(max_entries, 16384);
118 __type(value, struct cgv_node_stash);
119 } cgv_node_stash SEC(".maps");
121 struct fcg_task_ctx {
126 __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
127 __uint(map_flags, BPF_F_NO_PREALLOC);
129 __type(value, struct fcg_task_ctx);
130 } task_ctx SEC(".maps");
132 /* gets inc'd on weight tree changes to expire the cached hweights */
135 static u64 div_round_up(u64 dividend, u64 divisor)
137 return (dividend + divisor - 1) / divisor;
140 static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
142 struct cgv_node *cgc_a, *cgc_b;
144 cgc_a = container_of(a, struct cgv_node, rb_node);
145 cgc_b = container_of(b, struct cgv_node, rb_node);
147 return cgc_a->cvtime < cgc_b->cvtime;
150 static struct fcg_cpu_ctx *find_cpu_ctx(void)
152 struct fcg_cpu_ctx *cpuc;
155 cpuc = bpf_map_lookup_elem(&cpu_ctx, &idx);
157 scx_bpf_error("cpu_ctx lookup failed");
163 static struct fcg_cgrp_ctx *find_cgrp_ctx(struct cgroup *cgrp)
165 struct fcg_cgrp_ctx *cgc;
167 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
169 scx_bpf_error("cgrp_ctx lookup failed for cgid %llu", cgrp->kn->id);
175 static struct fcg_cgrp_ctx *find_ancestor_cgrp_ctx(struct cgroup *cgrp, int level)
177 struct fcg_cgrp_ctx *cgc;
179 cgrp = bpf_cgroup_ancestor(cgrp, level);
181 scx_bpf_error("ancestor cgroup lookup failed");
185 cgc = find_cgrp_ctx(cgrp);
187 scx_bpf_error("ancestor cgrp_ctx lookup failed");
188 bpf_cgroup_release(cgrp);
192 static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
196 if (!cgc->nr_active) {
197 stat_inc(FCG_STAT_HWT_SKIP);
201 if (cgc->hweight_gen == hweight_gen) {
202 stat_inc(FCG_STAT_HWT_CACHE);
206 stat_inc(FCG_STAT_HWT_UPDATES);
207 bpf_for(level, 0, cgrp->level + 1) {
208 struct fcg_cgrp_ctx *cgc;
211 cgc = find_ancestor_cgrp_ctx(cgrp, level);
216 cgc->hweight = FCG_HWEIGHT_ONE;
217 cgc->hweight_gen = hweight_gen;
219 struct fcg_cgrp_ctx *pcgc;
221 pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
226 * We can be opportunistic here and not grab the
227 * cgv_tree_lock and deal with the occasional races.
228 * However, hweight updates are already cached and
229 * relatively low-frequency. Let's just do the
230 * straightforward thing.
232 bpf_spin_lock(&cgv_tree_lock);
233 is_active = cgc->nr_active;
235 cgc->hweight_gen = pcgc->hweight_gen;
237 div_round_up(pcgc->hweight * cgc->weight,
238 pcgc->child_weight_sum);
240 bpf_spin_unlock(&cgv_tree_lock);
243 stat_inc(FCG_STAT_HWT_RACE);
250 static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
252 u64 delta, cvtime, max_budget;
255 * A node which is on the rbtree can't be pointed to from elsewhere yet
256 * and thus can't be updated and repositioned. Instead, we collect the
257 * vtime deltas separately and apply it asynchronously here.
259 delta = __sync_fetch_and_sub(&cgc->cvtime_delta, cgc->cvtime_delta);
260 cvtime = cgv_node->cvtime + delta;
263 * Allow a cgroup to carry the maximum budget proportional to its
264 * hweight such that a full-hweight cgroup can immediately take up half
265 * of the CPUs at the most while staying at the front of the rbtree.
267 max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) /
268 (2 * FCG_HWEIGHT_ONE);
269 if (time_before(cvtime, cvtime_now - max_budget))
270 cvtime = cvtime_now - max_budget;
272 cgv_node->cvtime = cvtime;
275 static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
277 struct cgv_node_stash *stash;
278 struct cgv_node *cgv_node;
279 u64 cgid = cgrp->kn->id;
281 /* paired with cmpxchg in try_pick_next_cgroup() */
282 if (__sync_val_compare_and_swap(&cgc->queued, 0, 1)) {
283 stat_inc(FCG_STAT_ENQ_SKIP);
287 stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
289 scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid);
293 /* NULL if the node is already on the rbtree */
294 cgv_node = bpf_kptr_xchg(&stash->node, NULL);
296 stat_inc(FCG_STAT_ENQ_RACE);
300 bpf_spin_lock(&cgv_tree_lock);
301 cgrp_cap_budget(cgv_node, cgc);
302 bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
303 bpf_spin_unlock(&cgv_tree_lock);
306 static void set_bypassed_at(struct task_struct *p, struct fcg_task_ctx *taskc)
309 * Tell fcg_stopping() that this bypassed the regular scheduling path
310 * and should be force charged to the cgroup. 0 is used to indicate that
311 * the task isn't bypassing, so if the current runtime is 0, go back by
314 taskc->bypassed_at = p->se.sum_exec_runtime ?: (u64)-1;
317 s32 BPF_STRUCT_OPS(fcg_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
319 struct fcg_task_ctx *taskc;
320 bool is_idle = false;
323 cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
325 taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
327 scx_bpf_error("task_ctx lookup failed");
332 * If select_cpu_dfl() is recommending local enqueue, the target CPU is
333 * idle. Follow it and charge the cgroup later in fcg_stopping() after
337 set_bypassed_at(p, taskc);
338 stat_inc(FCG_STAT_LOCAL);
339 scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
345 void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
347 struct fcg_task_ctx *taskc;
349 struct fcg_cgrp_ctx *cgc;
351 taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
353 scx_bpf_error("task_ctx lookup failed");
358 * Use the direct dispatching and force charging to deal with tasks with
359 * custom affinities so that we don't have to worry about per-cgroup
360 * dq's containing tasks that can't be executed from some CPUs.
362 if (p->nr_cpus_allowed != nr_cpus) {
363 set_bypassed_at(p, taskc);
366 * The global dq is deprioritized as we don't want to let tasks
367 * to boost themselves by constraining its cpumask. The
368 * deprioritization is rather severe, so let's not apply that to
369 * per-cpu kernel threads. This is ham-fisted. We probably wanna
370 * implement per-cgroup fallback dq's instead so that we have
371 * more control over when tasks with custom cpumask get issued.
373 if (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD)) {
374 stat_inc(FCG_STAT_LOCAL);
375 scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
378 stat_inc(FCG_STAT_GLOBAL);
379 scx_bpf_dsq_insert(p, FALLBACK_DSQ, SCX_SLICE_DFL,
385 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
386 cgc = find_cgrp_ctx(cgrp);
391 scx_bpf_dsq_insert(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags);
393 u64 tvtime = p->scx.dsq_vtime;
396 * Limit the amount of budget that an idling task can accumulate
399 if (time_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
400 tvtime = cgc->tvtime_now - SCX_SLICE_DFL;
402 scx_bpf_dsq_insert_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
406 cgrp_enqueued(cgrp, cgc);
408 bpf_cgroup_release(cgrp);
412 * Walk the cgroup tree to update the active weight sums as tasks wake up and
413 * sleep. The weight sums are used as the base when calculating the proportion a
414 * given cgroup or task is entitled to at each level.
416 static void update_active_weight_sums(struct cgroup *cgrp, bool runnable)
418 struct fcg_cgrp_ctx *cgc;
419 bool updated = false;
422 cgc = find_cgrp_ctx(cgrp);
427 * In most cases, a hot cgroup would have multiple threads going to
428 * sleep and waking up while the whole cgroup stays active. In leaf
429 * cgroups, ->nr_runnable which is updated with __sync operations gates
430 * ->nr_active updates, so that we don't have to grab the cgv_tree_lock
431 * repeatedly for a busy cgroup which is staying active.
434 if (__sync_fetch_and_add(&cgc->nr_runnable, 1))
436 stat_inc(FCG_STAT_ACT);
438 if (__sync_sub_and_fetch(&cgc->nr_runnable, 1))
440 stat_inc(FCG_STAT_DEACT);
444 * If @cgrp is becoming runnable, its hweight should be refreshed after
445 * it's added to the weight tree so that enqueue has the up-to-date
446 * value. If @cgrp is becoming quiescent, the hweight should be
447 * refreshed before it's removed from the weight tree so that the usage
448 * charging which happens afterwards has access to the latest value.
451 cgrp_refresh_hweight(cgrp, cgc);
453 /* propagate upwards */
454 bpf_for(idx, 0, cgrp->level) {
455 int level = cgrp->level - idx;
456 struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
457 bool propagate = false;
459 cgc = find_ancestor_cgrp_ctx(cgrp, level);
463 pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
469 * We need the propagation protected by a lock to synchronize
470 * against weight changes. There's no reason to drop the lock at
471 * each level but bpf_spin_lock() doesn't want any function
472 * calls while locked.
474 bpf_spin_lock(&cgv_tree_lock);
477 if (!cgc->nr_active++) {
481 pcgc->child_weight_sum += cgc->weight;
485 if (!--cgc->nr_active) {
489 pcgc->child_weight_sum -= cgc->weight;
494 bpf_spin_unlock(&cgv_tree_lock);
501 __sync_fetch_and_add(&hweight_gen, 1);
504 cgrp_refresh_hweight(cgrp, cgc);
507 void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags)
511 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
512 update_active_weight_sums(cgrp, true);
513 bpf_cgroup_release(cgrp);
516 void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
519 struct fcg_cgrp_ctx *cgc;
524 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
525 cgc = find_cgrp_ctx(cgrp);
528 * @cgc->tvtime_now always progresses forward as tasks start
529 * executing. The test and update can be performed concurrently
530 * from multiple CPUs and thus racy. Any error should be
531 * contained and temporary. Let's just live with it.
533 if (time_before(cgc->tvtime_now, p->scx.dsq_vtime))
534 cgc->tvtime_now = p->scx.dsq_vtime;
536 bpf_cgroup_release(cgrp);
539 void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
541 struct fcg_task_ctx *taskc;
543 struct fcg_cgrp_ctx *cgc;
546 * Scale the execution time by the inverse of the weight and charge.
548 * Note that the default yield implementation yields by setting
549 * @p->scx.slice to zero and the following would treat the yielding task
550 * as if it has consumed all its slice. If this penalizes yielding tasks
551 * too much, determine the execution time by taking explicit timestamps
552 * instead of depending on @p->scx.slice.
556 (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
558 taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
560 scx_bpf_error("task_ctx lookup failed");
564 if (!taskc->bypassed_at)
567 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
568 cgc = find_cgrp_ctx(cgrp);
570 __sync_fetch_and_add(&cgc->cvtime_delta,
571 p->se.sum_exec_runtime - taskc->bypassed_at);
572 taskc->bypassed_at = 0;
574 bpf_cgroup_release(cgrp);
577 void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags)
581 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
582 update_active_weight_sums(cgrp, false);
583 bpf_cgroup_release(cgrp);
586 void BPF_STRUCT_OPS(fcg_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
588 struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
590 cgc = find_cgrp_ctx(cgrp);
595 pcgc = find_ancestor_cgrp_ctx(cgrp, cgrp->level - 1);
600 bpf_spin_lock(&cgv_tree_lock);
601 if (pcgc && cgc->nr_active)
602 pcgc->child_weight_sum += (s64)weight - cgc->weight;
603 cgc->weight = weight;
604 bpf_spin_unlock(&cgv_tree_lock);
607 static bool try_pick_next_cgroup(u64 *cgidp)
609 struct bpf_rb_node *rb_node;
610 struct cgv_node_stash *stash;
611 struct cgv_node *cgv_node;
612 struct fcg_cgrp_ctx *cgc;
616 /* pop the front cgroup and wind cvtime_now accordingly */
617 bpf_spin_lock(&cgv_tree_lock);
619 rb_node = bpf_rbtree_first(&cgv_tree);
621 bpf_spin_unlock(&cgv_tree_lock);
622 stat_inc(FCG_STAT_PNC_NO_CGRP);
627 rb_node = bpf_rbtree_remove(&cgv_tree, rb_node);
628 bpf_spin_unlock(&cgv_tree_lock);
632 * This should never happen. bpf_rbtree_first() was called
633 * above while the tree lock was held, so the node should
636 scx_bpf_error("node could not be removed");
640 cgv_node = container_of(rb_node, struct cgv_node, rb_node);
641 cgid = cgv_node->cgid;
643 if (time_before(cvtime_now, cgv_node->cvtime))
644 cvtime_now = cgv_node->cvtime;
647 * If lookup fails, the cgroup's gone. Free and move on. See
650 cgrp = bpf_cgroup_from_id(cgid);
652 stat_inc(FCG_STAT_PNC_GONE);
656 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
658 bpf_cgroup_release(cgrp);
659 stat_inc(FCG_STAT_PNC_GONE);
663 if (!scx_bpf_dsq_move_to_local(cgid)) {
664 bpf_cgroup_release(cgrp);
665 stat_inc(FCG_STAT_PNC_EMPTY);
670 * Successfully consumed from the cgroup. This will be our current
671 * cgroup for the new slice. Refresh its hweight.
673 cgrp_refresh_hweight(cgrp, cgc);
675 bpf_cgroup_release(cgrp);
678 * As the cgroup may have more tasks, add it back to the rbtree. Note
679 * that here we charge the full slice upfront and then exact later
680 * according to the actual consumption. This prevents lowpri thundering
681 * herd from saturating the machine.
683 bpf_spin_lock(&cgv_tree_lock);
684 cgv_node->cvtime += cgrp_slice_ns * FCG_HWEIGHT_ONE / (cgc->hweight ?: 1);
685 cgrp_cap_budget(cgv_node, cgc);
686 bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
687 bpf_spin_unlock(&cgv_tree_lock);
690 stat_inc(FCG_STAT_PNC_NEXT);
694 stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
696 stat_inc(FCG_STAT_PNC_GONE);
701 * Paired with cmpxchg in cgrp_enqueued(). If they see the following
702 * transition, they'll enqueue the cgroup. If they are earlier, we'll
703 * see their task in the dq below and requeue the cgroup.
705 __sync_val_compare_and_swap(&cgc->queued, 1, 0);
707 if (scx_bpf_dsq_nr_queued(cgid)) {
708 bpf_spin_lock(&cgv_tree_lock);
709 bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
710 bpf_spin_unlock(&cgv_tree_lock);
711 stat_inc(FCG_STAT_PNC_RACE);
713 cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
715 scx_bpf_error("unexpected !NULL cgv_node stash");
723 bpf_obj_drop(cgv_node);
727 void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
729 struct fcg_cpu_ctx *cpuc;
730 struct fcg_cgrp_ctx *cgc;
732 u64 now = scx_bpf_now();
733 bool picked_next = false;
735 cpuc = find_cpu_ctx();
740 goto pick_next_cgroup;
742 if (time_before(now, cpuc->cur_at + cgrp_slice_ns)) {
743 if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) {
744 stat_inc(FCG_STAT_CNS_KEEP);
747 stat_inc(FCG_STAT_CNS_EMPTY);
749 stat_inc(FCG_STAT_CNS_EXPIRE);
753 * The current cgroup is expiring. It was already charged a full slice.
754 * Calculate the actual usage and accumulate the delta.
756 cgrp = bpf_cgroup_from_id(cpuc->cur_cgid);
758 stat_inc(FCG_STAT_CNS_GONE);
759 goto pick_next_cgroup;
762 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
765 * We want to update the vtime delta and then look for the next
766 * cgroup to execute but the latter needs to be done in a loop
767 * and we can't keep the lock held. Oh well...
769 bpf_spin_lock(&cgv_tree_lock);
770 __sync_fetch_and_add(&cgc->cvtime_delta,
771 (cpuc->cur_at + cgrp_slice_ns - now) *
772 FCG_HWEIGHT_ONE / (cgc->hweight ?: 1));
773 bpf_spin_unlock(&cgv_tree_lock);
775 stat_inc(FCG_STAT_CNS_GONE);
778 bpf_cgroup_release(cgrp);
783 if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ)) {
788 bpf_repeat(CGROUP_MAX_RETRIES) {
789 if (try_pick_next_cgroup(&cpuc->cur_cgid)) {
796 * This only happens if try_pick_next_cgroup() races against enqueue
797 * path for more than CGROUP_MAX_RETRIES times, which is extremely
798 * unlikely and likely indicates an underlying bug. There shouldn't be
799 * any stall risk as the race is against enqueue.
802 stat_inc(FCG_STAT_PNC_FAIL);
805 s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p,
806 struct scx_init_task_args *args)
808 struct fcg_task_ctx *taskc;
809 struct fcg_cgrp_ctx *cgc;
812 * @p is new. Let's ensure that its task_ctx is available. We can sleep
813 * in this function and the following will automatically use GFP_KERNEL.
815 taskc = bpf_task_storage_get(&task_ctx, p, 0,
816 BPF_LOCAL_STORAGE_GET_F_CREATE);
820 taskc->bypassed_at = 0;
822 if (!(cgc = find_cgrp_ctx(args->cgroup)))
825 p->scx.dsq_vtime = cgc->tvtime_now;
830 int BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init, struct cgroup *cgrp,
831 struct scx_cgroup_init_args *args)
833 struct fcg_cgrp_ctx *cgc;
834 struct cgv_node *cgv_node;
835 struct cgv_node_stash empty_stash = {}, *stash;
836 u64 cgid = cgrp->kn->id;
840 * Technically incorrect as cgroup ID is full 64bit while dsq ID is
841 * 63bit. Should not be a problem in practice and easy to spot in the
842 * unlikely case that it breaks.
844 ret = scx_bpf_create_dsq(cgid, -1);
848 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0,
849 BPF_LOCAL_STORAGE_GET_F_CREATE);
852 goto err_destroy_dsq;
855 cgc->weight = args->weight;
856 cgc->hweight = FCG_HWEIGHT_ONE;
858 ret = bpf_map_update_elem(&cgv_node_stash, &cgid, &empty_stash,
862 scx_bpf_error("unexpected stash creation error (%d)",
864 goto err_destroy_dsq;
867 stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
869 scx_bpf_error("unexpected cgv_node stash lookup failure");
871 goto err_destroy_dsq;
874 cgv_node = bpf_obj_new(struct cgv_node);
877 goto err_del_cgv_node;
880 cgv_node->cgid = cgid;
881 cgv_node->cvtime = cvtime_now;
883 cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
885 scx_bpf_error("unexpected !NULL cgv_node stash");
893 bpf_obj_drop(cgv_node);
895 bpf_map_delete_elem(&cgv_node_stash, &cgid);
897 scx_bpf_destroy_dsq(cgid);
901 void BPF_STRUCT_OPS(fcg_cgroup_exit, struct cgroup *cgrp)
903 u64 cgid = cgrp->kn->id;
906 * For now, there's no way find and remove the cgv_node if it's on the
907 * cgv_tree. Let's drain them in the dispatch path as they get popped
908 * off the front of the tree.
910 bpf_map_delete_elem(&cgv_node_stash, &cgid);
911 scx_bpf_destroy_dsq(cgid);
914 void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
915 struct cgroup *from, struct cgroup *to)
917 struct fcg_cgrp_ctx *from_cgc, *to_cgc;
920 /* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
921 if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to)))
924 delta = time_delta(p->scx.dsq_vtime, from_cgc->tvtime_now);
925 p->scx.dsq_vtime = to_cgc->tvtime_now + delta;
928 s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)
930 return scx_bpf_create_dsq(FALLBACK_DSQ, -1);
933 void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei)
938 SCX_OPS_DEFINE(flatcg_ops,
939 .select_cpu = (void *)fcg_select_cpu,
940 .enqueue = (void *)fcg_enqueue,
941 .dispatch = (void *)fcg_dispatch,
942 .runnable = (void *)fcg_runnable,
943 .running = (void *)fcg_running,
944 .stopping = (void *)fcg_stopping,
945 .quiescent = (void *)fcg_quiescent,
946 .init_task = (void *)fcg_init_task,
947 .cgroup_set_weight = (void *)fcg_cgroup_set_weight,
948 .cgroup_init = (void *)fcg_cgroup_init,
949 .cgroup_exit = (void *)fcg_cgroup_exit,
950 .cgroup_move = (void *)fcg_cgroup_move,
951 .init = (void *)fcg_init,
952 .exit = (void *)fcg_exit,
953 .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING,