]> Git Repo - linux.git/blob - tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
Linux 6.14-rc3
[linux.git] / tools / testing / selftests / bpf / progs / cgroup_hierarchical_stats.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2022 Google LLC.
4  */
5 #include "vmlinux.h"
6 #include <bpf/bpf_helpers.h>
7 #include <bpf/bpf_tracing.h>
8 #include <bpf/bpf_core_read.h>
9
10 char _license[] SEC("license") = "GPL";
11
12 struct percpu_attach_counter {
13         /* Previous percpu state, to figure out if we have new updates */
14         __u64 prev;
15         /* Current percpu state */
16         __u64 state;
17 };
18
19 struct attach_counter {
20         /* State propagated through children, pending aggregation */
21         __u64 pending;
22         /* Total state, including all cpus and all children */
23         __u64 state;
24 };
25
26 struct {
27         __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
28         __uint(max_entries, 1024);
29         __type(key, __u64);
30         __type(value, struct percpu_attach_counter);
31 } percpu_attach_counters SEC(".maps");
32
33 struct {
34         __uint(type, BPF_MAP_TYPE_HASH);
35         __uint(max_entries, 1024);
36         __type(key, __u64);
37         __type(value, struct attach_counter);
38 } attach_counters SEC(".maps");
39
40 extern void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) __ksym;
41 extern void cgroup_rstat_flush(struct cgroup *cgrp) __ksym;
42
43 static uint64_t cgroup_id(struct cgroup *cgrp)
44 {
45         return cgrp->kn->id;
46 }
47
48 static int create_percpu_attach_counter(__u64 cg_id, __u64 state)
49 {
50         struct percpu_attach_counter pcpu_init = {.state = state, .prev = 0};
51
52         return bpf_map_update_elem(&percpu_attach_counters, &cg_id,
53                                    &pcpu_init, BPF_NOEXIST);
54 }
55
56 static int create_attach_counter(__u64 cg_id, __u64 state, __u64 pending)
57 {
58         struct attach_counter init = {.state = state, .pending = pending};
59
60         return bpf_map_update_elem(&attach_counters, &cg_id,
61                                    &init, BPF_NOEXIST);
62 }
63
64 SEC("fentry/cgroup_attach_task")
65 int BPF_PROG(counter, struct cgroup *dst_cgrp, struct task_struct *leader,
66              bool threadgroup)
67 {
68         __u64 cg_id = cgroup_id(dst_cgrp);
69         struct percpu_attach_counter *pcpu_counter = bpf_map_lookup_elem(
70                         &percpu_attach_counters,
71                         &cg_id);
72
73         if (pcpu_counter)
74                 pcpu_counter->state += 1;
75         else if (create_percpu_attach_counter(cg_id, 1))
76                 return 0;
77
78         cgroup_rstat_updated(dst_cgrp, bpf_get_smp_processor_id());
79         return 0;
80 }
81
82 SEC("fentry/bpf_rstat_flush")
83 int BPF_PROG(flusher, struct cgroup *cgrp, struct cgroup *parent, int cpu)
84 {
85         struct percpu_attach_counter *pcpu_counter;
86         struct attach_counter *total_counter, *parent_counter;
87         __u64 cg_id = cgroup_id(cgrp);
88         __u64 parent_cg_id = parent ? cgroup_id(parent) : 0;
89         __u64 state;
90         __u64 delta = 0;
91
92         /* Add CPU changes on this level since the last flush */
93         pcpu_counter = bpf_map_lookup_percpu_elem(&percpu_attach_counters,
94                                                   &cg_id, cpu);
95         if (pcpu_counter) {
96                 state = pcpu_counter->state;
97                 delta += state - pcpu_counter->prev;
98                 pcpu_counter->prev = state;
99         }
100
101         total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id);
102         if (!total_counter) {
103                 if (create_attach_counter(cg_id, delta, 0))
104                         return 0;
105                 goto update_parent;
106         }
107
108         /* Collect pending stats from subtree */
109         if (total_counter->pending) {
110                 delta += total_counter->pending;
111                 total_counter->pending = 0;
112         }
113
114         /* Propagate changes to this cgroup's total */
115         total_counter->state += delta;
116
117 update_parent:
118         /* Skip if there are no changes to propagate, or no parent */
119         if (!delta || !parent_cg_id)
120                 return 0;
121
122         /* Propagate changes to cgroup's parent */
123         parent_counter = bpf_map_lookup_elem(&attach_counters,
124                                              &parent_cg_id);
125         if (parent_counter)
126                 parent_counter->pending += delta;
127         else
128                 create_attach_counter(parent_cg_id, 0, delta);
129         return 0;
130 }
131
132 SEC("iter.s/cgroup")
133 int BPF_PROG(dumper, struct bpf_iter_meta *meta, struct cgroup *cgrp)
134 {
135         struct seq_file *seq = meta->seq;
136         struct attach_counter *total_counter;
137         __u64 cg_id = cgrp ? cgroup_id(cgrp) : 0;
138
139         /* Do nothing for the terminal call */
140         if (!cg_id)
141                 return 1;
142
143         /* Flush the stats to make sure we get the most updated numbers */
144         cgroup_rstat_flush(cgrp);
145
146         total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id);
147         if (!total_counter) {
148                 BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: 0\n",
149                                cg_id);
150         } else {
151                 BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: %llu\n",
152                                cg_id, total_counter->state);
153         }
154         return 0;
155 }
This page took 0.040576 seconds and 4 git commands to generate.