1 // SPDX-License-Identifier: GPL-2.0-only
3 * Process number limiting controller for cgroups.
5 * Used to allow a cgroup hierarchy to stop any new processes from fork()ing
6 * after a certain limit is reached.
8 * Since it is trivial to hit the task limit without hitting any kmemcg limits
9 * in place, PIDs are a fundamental resource. As such, PID exhaustion must be
10 * preventable in the scope of a cgroup hierarchy by allowing resource limiting
11 * of the number of tasks in a cgroup.
13 * In order to use the `pids` controller, set the maximum number of tasks in
14 * pids.max (this is not available in the root cgroup for obvious reasons). The
15 * number of processes currently in the cgroup is given by pids.current.
16 * Organisational operations are not blocked by cgroup policies, so it is
17 * possible to have pids.current > pids.max. However, it is not possible to
18 * violate a cgroup policy through fork(). fork() will return -EAGAIN if forking
19 * would cause a cgroup policy to be violated.
21 * To set a cgroup to have no limit, set pids.max to "max". This is the default
22 * for all new cgroups (N.B. that PID limits are hierarchical, so the most
23 * stringent limit in the hierarchy is followed).
25 * pids.current tracks all child cgroup hierarchies, so parent/pids.current is
26 * a superset of parent/child/pids.current.
31 #include <linux/kernel.h>
32 #include <linux/threads.h>
33 #include <linux/atomic.h>
34 #include <linux/cgroup.h>
35 #include <linux/slab.h>
36 #include <linux/sched/task.h>
38 #define PIDS_MAX (PID_MAX_LIMIT + 1ULL)
39 #define PIDS_MAX_STR "max"
42 struct cgroup_subsys_state css;
45 * Use 64-bit types so that we can safely represent "max" as
46 * %PIDS_MAX = (%PID_MAX_LIMIT + 1).
52 /* Handle for "pids.events" */
53 struct cgroup_file events_file;
55 /* Number of times fork failed because limit was hit. */
56 atomic64_t events_limit;
59 static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css)
61 return container_of(css, struct pids_cgroup, css);
64 static struct pids_cgroup *parent_pids(struct pids_cgroup *pids)
66 return css_pids(pids->css.parent);
69 static struct cgroup_subsys_state *
70 pids_css_alloc(struct cgroup_subsys_state *parent)
72 struct pids_cgroup *pids;
74 pids = kzalloc(sizeof(struct pids_cgroup), GFP_KERNEL);
76 return ERR_PTR(-ENOMEM);
78 atomic64_set(&pids->limit, PIDS_MAX);
82 static void pids_css_free(struct cgroup_subsys_state *css)
87 static void pids_update_watermark(struct pids_cgroup *p, int64_t nr_pids)
90 * This is racy, but we don't need perfectly accurate tallying of
91 * the watermark, and this lets us avoid extra atomic overhead.
93 if (nr_pids > READ_ONCE(p->watermark))
94 WRITE_ONCE(p->watermark, nr_pids);
98 * pids_cancel - uncharge the local pid count
99 * @pids: the pid cgroup state
100 * @num: the number of pids to cancel
102 * This function will WARN if the pid count goes under 0, because such a case is
103 * a bug in the pids controller proper.
105 static void pids_cancel(struct pids_cgroup *pids, int num)
108 * A negative count (or overflow for that matter) is invalid,
109 * and indicates a bug in the `pids` controller proper.
111 WARN_ON_ONCE(atomic64_add_negative(-num, &pids->counter));
115 * pids_uncharge - hierarchically uncharge the pid count
116 * @pids: the pid cgroup state
117 * @num: the number of pids to uncharge
119 static void pids_uncharge(struct pids_cgroup *pids, int num)
121 struct pids_cgroup *p;
123 for (p = pids; parent_pids(p); p = parent_pids(p))
128 * pids_charge - hierarchically charge the pid count
129 * @pids: the pid cgroup state
130 * @num: the number of pids to charge
132 * This function does *not* follow the pid limit set. It cannot fail and the new
133 * pid count may exceed the limit. This is only used for reverting failed
134 * attaches, where there is no other way out than violating the limit.
136 static void pids_charge(struct pids_cgroup *pids, int num)
138 struct pids_cgroup *p;
140 for (p = pids; parent_pids(p); p = parent_pids(p)) {
141 int64_t new = atomic64_add_return(num, &p->counter);
143 pids_update_watermark(p, new);
148 * pids_try_charge - hierarchically try to charge the pid count
149 * @pids: the pid cgroup state
150 * @num: the number of pids to charge
152 * This function follows the set limit. It will fail if the charge would cause
153 * the new value to exceed the hierarchical limit. Returns 0 if the charge
154 * succeeded, otherwise -EAGAIN.
156 static int pids_try_charge(struct pids_cgroup *pids, int num)
158 struct pids_cgroup *p, *q;
160 for (p = pids; parent_pids(p); p = parent_pids(p)) {
161 int64_t new = atomic64_add_return(num, &p->counter);
162 int64_t limit = atomic64_read(&p->limit);
165 * Since new is capped to the maximum number of pid_t, if
166 * p->limit is %PIDS_MAX then we know that this test will never
173 * Not technically accurate if we go over limit somewhere up
174 * the hierarchy, but that's tolerable for the watermark.
176 pids_update_watermark(p, new);
182 for (q = pids; q != p; q = parent_pids(q))
189 static int pids_can_attach(struct cgroup_taskset *tset)
191 struct task_struct *task;
192 struct cgroup_subsys_state *dst_css;
194 cgroup_taskset_for_each(task, dst_css, tset) {
195 struct pids_cgroup *pids = css_pids(dst_css);
196 struct cgroup_subsys_state *old_css;
197 struct pids_cgroup *old_pids;
200 * No need to pin @old_css between here and cancel_attach()
201 * because cgroup core protects it from being freed before
202 * the migration completes or fails.
204 old_css = task_css(task, pids_cgrp_id);
205 old_pids = css_pids(old_css);
207 pids_charge(pids, 1);
208 pids_uncharge(old_pids, 1);
214 static void pids_cancel_attach(struct cgroup_taskset *tset)
216 struct task_struct *task;
217 struct cgroup_subsys_state *dst_css;
219 cgroup_taskset_for_each(task, dst_css, tset) {
220 struct pids_cgroup *pids = css_pids(dst_css);
221 struct cgroup_subsys_state *old_css;
222 struct pids_cgroup *old_pids;
224 old_css = task_css(task, pids_cgrp_id);
225 old_pids = css_pids(old_css);
227 pids_charge(old_pids, 1);
228 pids_uncharge(pids, 1);
233 * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
234 * on cgroup_threadgroup_change_begin() held by the copy_process().
236 static int pids_can_fork(struct task_struct *task, struct css_set *cset)
238 struct cgroup_subsys_state *css;
239 struct pids_cgroup *pids;
243 css = cset->subsys[pids_cgrp_id];
245 css = task_css_check(current, pids_cgrp_id, true);
246 pids = css_pids(css);
247 err = pids_try_charge(pids, 1);
249 /* Only log the first time events_limit is incremented. */
250 if (atomic64_inc_return(&pids->events_limit) == 1) {
251 pr_info("cgroup: fork rejected by pids controller in ");
252 pr_cont_cgroup_path(css->cgroup);
255 cgroup_file_notify(&pids->events_file);
260 static void pids_cancel_fork(struct task_struct *task, struct css_set *cset)
262 struct cgroup_subsys_state *css;
263 struct pids_cgroup *pids;
266 css = cset->subsys[pids_cgrp_id];
268 css = task_css_check(current, pids_cgrp_id, true);
269 pids = css_pids(css);
270 pids_uncharge(pids, 1);
273 static void pids_release(struct task_struct *task)
275 struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
277 pids_uncharge(pids, 1);
280 static ssize_t pids_max_write(struct kernfs_open_file *of, char *buf,
281 size_t nbytes, loff_t off)
283 struct cgroup_subsys_state *css = of_css(of);
284 struct pids_cgroup *pids = css_pids(css);
289 if (!strcmp(buf, PIDS_MAX_STR)) {
294 err = kstrtoll(buf, 0, &limit);
298 if (limit < 0 || limit >= PIDS_MAX)
303 * Limit updates don't need to be mutex'd, since it isn't
304 * critical that any racing fork()s follow the new limit.
306 atomic64_set(&pids->limit, limit);
310 static int pids_max_show(struct seq_file *sf, void *v)
312 struct cgroup_subsys_state *css = seq_css(sf);
313 struct pids_cgroup *pids = css_pids(css);
314 int64_t limit = atomic64_read(&pids->limit);
316 if (limit >= PIDS_MAX)
317 seq_printf(sf, "%s\n", PIDS_MAX_STR);
319 seq_printf(sf, "%lld\n", limit);
324 static s64 pids_current_read(struct cgroup_subsys_state *css,
327 struct pids_cgroup *pids = css_pids(css);
329 return atomic64_read(&pids->counter);
332 static s64 pids_peak_read(struct cgroup_subsys_state *css,
335 struct pids_cgroup *pids = css_pids(css);
337 return READ_ONCE(pids->watermark);
340 static int pids_events_show(struct seq_file *sf, void *v)
342 struct pids_cgroup *pids = css_pids(seq_css(sf));
344 seq_printf(sf, "max %lld\n", (s64)atomic64_read(&pids->events_limit));
348 static struct cftype pids_files[] = {
351 .write = pids_max_write,
352 .seq_show = pids_max_show,
353 .flags = CFTYPE_NOT_ON_ROOT,
357 .read_s64 = pids_current_read,
358 .flags = CFTYPE_NOT_ON_ROOT,
362 .flags = CFTYPE_NOT_ON_ROOT,
363 .read_s64 = pids_peak_read,
367 .seq_show = pids_events_show,
368 .file_offset = offsetof(struct pids_cgroup, events_file),
369 .flags = CFTYPE_NOT_ON_ROOT,
374 struct cgroup_subsys pids_cgrp_subsys = {
375 .css_alloc = pids_css_alloc,
376 .css_free = pids_css_free,
377 .can_attach = pids_can_attach,
378 .cancel_attach = pids_cancel_attach,
379 .can_fork = pids_can_fork,
380 .cancel_fork = pids_cancel_fork,
381 .release = pids_release,
382 .legacy_cftypes = pids_files,
383 .dfl_cftypes = pids_files,