]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/mm/oom_kill.c | |
4 | * | |
5 | * Copyright (C) 1998,2000 Rik van Riel | |
6 | * Thanks go out to Claus Fischer for some serious inspiration and | |
7 | * for goading me into coding this file... | |
a63d83f4 DR |
8 | * Copyright (C) 2010 Google, Inc. |
9 | * Rewritten by David Rientjes | |
1da177e4 LT |
10 | * |
11 | * The routines in this file are used to kill a process when | |
a49335cc PJ |
12 | * we're seriously out of memory. This gets called from __alloc_pages() |
13 | * in mm/page_alloc.c when we really run out of memory. | |
1da177e4 LT |
14 | * |
15 | * Since we won't call these routines often (on a well-configured | |
16 | * machine) this file will double as a 'coding guide' and a signpost | |
17 | * for newbie kernel hackers. It features several pointers to major | |
18 | * kernel subsystems and hints as to where to find out what things do. | |
19 | */ | |
20 | ||
8ac773b4 | 21 | #include <linux/oom.h> |
1da177e4 | 22 | #include <linux/mm.h> |
4e950f6f | 23 | #include <linux/err.h> |
5a0e3ad6 | 24 | #include <linux/gfp.h> |
1da177e4 | 25 | #include <linux/sched.h> |
6e84f315 | 26 | #include <linux/sched/mm.h> |
f7ccbae4 | 27 | #include <linux/sched/coredump.h> |
29930025 | 28 | #include <linux/sched/task.h> |
8a7ff02a | 29 | #include <linux/sched/debug.h> |
1da177e4 LT |
30 | #include <linux/swap.h> |
31 | #include <linux/timex.h> | |
32 | #include <linux/jiffies.h> | |
ef08e3b4 | 33 | #include <linux/cpuset.h> |
b95f1b31 | 34 | #include <linux/export.h> |
8bc719d3 | 35 | #include <linux/notifier.h> |
c7ba5c9e | 36 | #include <linux/memcontrol.h> |
6f48d0eb | 37 | #include <linux/mempolicy.h> |
5cd9c58f | 38 | #include <linux/security.h> |
edd45544 | 39 | #include <linux/ptrace.h> |
f660daac | 40 | #include <linux/freezer.h> |
43d2b113 | 41 | #include <linux/ftrace.h> |
dc3f21ea | 42 | #include <linux/ratelimit.h> |
aac45363 MH |
43 | #include <linux/kthread.h> |
44 | #include <linux/init.h> | |
4d4bbd85 | 45 | #include <linux/mmu_notifier.h> |
aac45363 MH |
46 | |
47 | #include <asm/tlb.h> | |
48 | #include "internal.h" | |
852d8be0 | 49 | #include "slab.h" |
43d2b113 KH |
50 | |
51 | #define CREATE_TRACE_POINTS | |
52 | #include <trace/events/oom.h> | |
1da177e4 | 53 | |
fadd8fbd | 54 | int sysctl_panic_on_oom; |
fe071d7e | 55 | int sysctl_oom_kill_allocating_task; |
ad915c43 | 56 | int sysctl_oom_dump_tasks = 1; |
dc56401f | 57 | |
a195d3f5 MH |
58 | /* |
59 | * Serializes oom killer invocations (out_of_memory()) from all contexts to | |
60 | * prevent from over eager oom killing (e.g. when the oom killer is invoked | |
61 | * from different domains). | |
62 | * | |
63 | * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled | |
64 | * and mark_oom_victim | |
65 | */ | |
dc56401f | 66 | DEFINE_MUTEX(oom_lock); |
67197a4f SB |
67 | /* Serializes oom_score_adj and oom_score_adj_min updates */ |
68 | DEFINE_MUTEX(oom_adj_mutex); | |
1da177e4 | 69 | |
ac311a14 SB |
70 | static inline bool is_memcg_oom(struct oom_control *oc) |
71 | { | |
72 | return oc->memcg != NULL; | |
73 | } | |
74 | ||
6f48d0eb DR |
75 | #ifdef CONFIG_NUMA |
76 | /** | |
ac311a14 | 77 | * oom_cpuset_eligible() - check task eligiblity for kill |
ad962441 | 78 | * @start: task struct of which task to consider |
f364f06b | 79 | * @oc: pointer to struct oom_control |
6f48d0eb DR |
80 | * |
81 | * Task eligibility is determined by whether or not a candidate task, @tsk, | |
82 | * shares the same mempolicy nodes as current if it is bound by such a policy | |
83 | * and whether or not it has the same set of allowed cpuset nodes. | |
ac311a14 SB |
84 | * |
85 | * This function is assuming oom-killer context and 'current' has triggered | |
86 | * the oom-killer. | |
495789a5 | 87 | */ |
ac311a14 SB |
88 | static bool oom_cpuset_eligible(struct task_struct *start, |
89 | struct oom_control *oc) | |
495789a5 | 90 | { |
ad962441 ON |
91 | struct task_struct *tsk; |
92 | bool ret = false; | |
ac311a14 SB |
93 | const nodemask_t *mask = oc->nodemask; |
94 | ||
95 | if (is_memcg_oom(oc)) | |
96 | return true; | |
495789a5 | 97 | |
ad962441 | 98 | rcu_read_lock(); |
1da4db0c | 99 | for_each_thread(start, tsk) { |
6f48d0eb DR |
100 | if (mask) { |
101 | /* | |
102 | * If this is a mempolicy constrained oom, tsk's | |
103 | * cpuset is irrelevant. Only return true if its | |
104 | * mempolicy intersects current, otherwise it may be | |
105 | * needlessly killed. | |
106 | */ | |
ad962441 | 107 | ret = mempolicy_nodemask_intersects(tsk, mask); |
6f48d0eb DR |
108 | } else { |
109 | /* | |
110 | * This is not a mempolicy constrained oom, so only | |
111 | * check the mems of tsk's cpuset. | |
112 | */ | |
ad962441 | 113 | ret = cpuset_mems_allowed_intersects(current, tsk); |
6f48d0eb | 114 | } |
ad962441 ON |
115 | if (ret) |
116 | break; | |
1da4db0c | 117 | } |
ad962441 | 118 | rcu_read_unlock(); |
df1090a8 | 119 | |
ad962441 | 120 | return ret; |
6f48d0eb DR |
121 | } |
122 | #else | |
ac311a14 | 123 | static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc) |
6f48d0eb DR |
124 | { |
125 | return true; | |
495789a5 | 126 | } |
6f48d0eb | 127 | #endif /* CONFIG_NUMA */ |
495789a5 | 128 | |
6f48d0eb DR |
129 | /* |
130 | * The process p may have detached its own ->mm while exiting or through | |
f5678e7f | 131 | * kthread_use_mm(), but one or more of its subthreads may still have a valid |
6f48d0eb DR |
132 | * pointer. Return p, or any of its subthreads with a valid ->mm, with |
133 | * task_lock() held. | |
134 | */ | |
158e0a2d | 135 | struct task_struct *find_lock_task_mm(struct task_struct *p) |
dd8e8f40 | 136 | { |
1da4db0c | 137 | struct task_struct *t; |
dd8e8f40 | 138 | |
4d4048be ON |
139 | rcu_read_lock(); |
140 | ||
1da4db0c | 141 | for_each_thread(p, t) { |
dd8e8f40 ON |
142 | task_lock(t); |
143 | if (likely(t->mm)) | |
4d4048be | 144 | goto found; |
dd8e8f40 | 145 | task_unlock(t); |
1da4db0c | 146 | } |
4d4048be ON |
147 | t = NULL; |
148 | found: | |
149 | rcu_read_unlock(); | |
dd8e8f40 | 150 | |
4d4048be | 151 | return t; |
dd8e8f40 ON |
152 | } |
153 | ||
db2a0dd7 YB |
154 | /* |
155 | * order == -1 means the oom kill is required by sysrq, otherwise only | |
156 | * for display purposes. | |
157 | */ | |
158 | static inline bool is_sysrq_oom(struct oom_control *oc) | |
159 | { | |
160 | return oc->order == -1; | |
161 | } | |
162 | ||
ab290adb | 163 | /* return true if the task is not adequate as candidate victim task. */ |
ac311a14 | 164 | static bool oom_unkillable_task(struct task_struct *p) |
ab290adb KM |
165 | { |
166 | if (is_global_init(p)) | |
167 | return true; | |
168 | if (p->flags & PF_KTHREAD) | |
169 | return true; | |
ab290adb KM |
170 | return false; |
171 | } | |
172 | ||
259b3633 HS |
173 | /** |
174 | * Check whether unreclaimable slab amount is greater than | |
175 | * all user memory(LRU pages). | |
176 | * dump_unreclaimable_slab() could help in the case that | |
177 | * oom due to too much unreclaimable slab used by kernel. | |
178 | */ | |
179 | static bool should_dump_unreclaim_slab(void) | |
852d8be0 YS |
180 | { |
181 | unsigned long nr_lru; | |
182 | ||
183 | nr_lru = global_node_page_state(NR_ACTIVE_ANON) + | |
184 | global_node_page_state(NR_INACTIVE_ANON) + | |
185 | global_node_page_state(NR_ACTIVE_FILE) + | |
186 | global_node_page_state(NR_INACTIVE_FILE) + | |
187 | global_node_page_state(NR_ISOLATED_ANON) + | |
188 | global_node_page_state(NR_ISOLATED_FILE) + | |
189 | global_node_page_state(NR_UNEVICTABLE); | |
190 | ||
d42f3245 | 191 | return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru); |
852d8be0 YS |
192 | } |
193 | ||
1da177e4 | 194 | /** |
a63d83f4 | 195 | * oom_badness - heuristic function to determine which candidate task to kill |
1da177e4 | 196 | * @p: task struct of which task we should calculate |
a63d83f4 | 197 | * @totalpages: total present RAM allowed for page allocation |
1da177e4 | 198 | * |
a63d83f4 DR |
199 | * The heuristic for determining which task to kill is made to be as simple and |
200 | * predictable as possible. The goal is to return the highest value for the | |
201 | * task consuming the most memory to avoid subsequent oom failures. | |
1da177e4 | 202 | */ |
9066e5cf | 203 | long oom_badness(struct task_struct *p, unsigned long totalpages) |
1da177e4 | 204 | { |
1e11ad8d | 205 | long points; |
61eafb00 | 206 | long adj; |
28b83c51 | 207 | |
ac311a14 | 208 | if (oom_unkillable_task(p)) |
9066e5cf | 209 | return LONG_MIN; |
1da177e4 | 210 | |
dd8e8f40 ON |
211 | p = find_lock_task_mm(p); |
212 | if (!p) | |
9066e5cf | 213 | return LONG_MIN; |
1da177e4 | 214 | |
bb8a4b7f MH |
215 | /* |
216 | * Do not even consider tasks which are explicitly marked oom | |
b18dc5f2 MH |
217 | * unkillable or have been already oom reaped or the are in |
218 | * the middle of vfork | |
bb8a4b7f | 219 | */ |
a9c58b90 | 220 | adj = (long)p->signal->oom_score_adj; |
bb8a4b7f | 221 | if (adj == OOM_SCORE_ADJ_MIN || |
862e3073 | 222 | test_bit(MMF_OOM_SKIP, &p->mm->flags) || |
b18dc5f2 | 223 | in_vfork(p)) { |
5aecc85a | 224 | task_unlock(p); |
9066e5cf | 225 | return LONG_MIN; |
5aecc85a MH |
226 | } |
227 | ||
1da177e4 | 228 | /* |
a63d83f4 | 229 | * The baseline for the badness score is the proportion of RAM that each |
f755a042 | 230 | * task's rss, pagetable and swap space use. |
1da177e4 | 231 | */ |
dc6c9a35 | 232 | points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + |
af5b0f6a | 233 | mm_pgtables_bytes(p->mm) / PAGE_SIZE; |
a63d83f4 | 234 | task_unlock(p); |
1da177e4 | 235 | |
61eafb00 DR |
236 | /* Normalize to oom_score_adj units */ |
237 | adj *= totalpages / 1000; | |
238 | points += adj; | |
1da177e4 | 239 | |
9066e5cf | 240 | return points; |
1da177e4 LT |
241 | } |
242 | ||
ef8444ea | 243 | static const char * const oom_constraint_text[] = { |
244 | [CONSTRAINT_NONE] = "CONSTRAINT_NONE", | |
245 | [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET", | |
246 | [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY", | |
247 | [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG", | |
7c5f64f8 VD |
248 | }; |
249 | ||
9b0f8b04 CL |
250 | /* |
251 | * Determine the type of allocation constraint. | |
252 | */ | |
7c5f64f8 | 253 | static enum oom_constraint constrained_alloc(struct oom_control *oc) |
4365a567 | 254 | { |
54a6eb5c | 255 | struct zone *zone; |
dd1a239f | 256 | struct zoneref *z; |
97a225e6 | 257 | enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask); |
a63d83f4 DR |
258 | bool cpuset_limited = false; |
259 | int nid; | |
9b0f8b04 | 260 | |
7c5f64f8 | 261 | if (is_memcg_oom(oc)) { |
bbec2e15 | 262 | oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; |
7c5f64f8 VD |
263 | return CONSTRAINT_MEMCG; |
264 | } | |
265 | ||
a63d83f4 | 266 | /* Default to all available memory */ |
ca79b0c2 | 267 | oc->totalpages = totalram_pages() + total_swap_pages; |
7c5f64f8 VD |
268 | |
269 | if (!IS_ENABLED(CONFIG_NUMA)) | |
270 | return CONSTRAINT_NONE; | |
a63d83f4 | 271 | |
6e0fc46d | 272 | if (!oc->zonelist) |
a63d83f4 | 273 | return CONSTRAINT_NONE; |
4365a567 KH |
274 | /* |
275 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid | |
276 | * to kill current.We have to random task kill in this case. | |
277 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. | |
278 | */ | |
6e0fc46d | 279 | if (oc->gfp_mask & __GFP_THISNODE) |
4365a567 | 280 | return CONSTRAINT_NONE; |
9b0f8b04 | 281 | |
4365a567 | 282 | /* |
a63d83f4 DR |
283 | * This is not a __GFP_THISNODE allocation, so a truncated nodemask in |
284 | * the page allocator means a mempolicy is in effect. Cpuset policy | |
285 | * is enforced in get_page_from_freelist(). | |
4365a567 | 286 | */ |
6e0fc46d DR |
287 | if (oc->nodemask && |
288 | !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { | |
7c5f64f8 | 289 | oc->totalpages = total_swap_pages; |
6e0fc46d | 290 | for_each_node_mask(nid, *oc->nodemask) |
1eb41bb0 | 291 | oc->totalpages += node_present_pages(nid); |
9b0f8b04 | 292 | return CONSTRAINT_MEMORY_POLICY; |
a63d83f4 | 293 | } |
4365a567 KH |
294 | |
295 | /* Check this allocation failure is caused by cpuset's wall function */ | |
6e0fc46d | 296 | for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, |
97a225e6 | 297 | highest_zoneidx, oc->nodemask) |
6e0fc46d | 298 | if (!cpuset_zone_allowed(zone, oc->gfp_mask)) |
a63d83f4 | 299 | cpuset_limited = true; |
9b0f8b04 | 300 | |
a63d83f4 | 301 | if (cpuset_limited) { |
7c5f64f8 | 302 | oc->totalpages = total_swap_pages; |
a63d83f4 | 303 | for_each_node_mask(nid, cpuset_current_mems_allowed) |
1eb41bb0 | 304 | oc->totalpages += node_present_pages(nid); |
a63d83f4 DR |
305 | return CONSTRAINT_CPUSET; |
306 | } | |
9b0f8b04 CL |
307 | return CONSTRAINT_NONE; |
308 | } | |
309 | ||
7c5f64f8 | 310 | static int oom_evaluate_task(struct task_struct *task, void *arg) |
462607ec | 311 | { |
7c5f64f8 | 312 | struct oom_control *oc = arg; |
9066e5cf | 313 | long points; |
7c5f64f8 | 314 | |
ac311a14 SB |
315 | if (oom_unkillable_task(task)) |
316 | goto next; | |
317 | ||
318 | /* p may not have freeable memory in nodemask */ | |
319 | if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) | |
7c5f64f8 | 320 | goto next; |
462607ec DR |
321 | |
322 | /* | |
323 | * This task already has access to memory reserves and is being killed. | |
a373966d | 324 | * Don't allow any other task to have access to the reserves unless |
862e3073 | 325 | * the task has MMF_OOM_SKIP because chances that it would release |
a373966d | 326 | * any memory is quite low. |
462607ec | 327 | */ |
862e3073 MH |
328 | if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { |
329 | if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) | |
7c5f64f8 VD |
330 | goto next; |
331 | goto abort; | |
a373966d | 332 | } |
462607ec | 333 | |
e1e12d2f DR |
334 | /* |
335 | * If task is allocating a lot of memory and has been marked to be | |
336 | * killed first if it triggers an oom, then select it. | |
337 | */ | |
7c5f64f8 | 338 | if (oom_task_origin(task)) { |
9066e5cf | 339 | points = LONG_MAX; |
7c5f64f8 VD |
340 | goto select; |
341 | } | |
e1e12d2f | 342 | |
ac311a14 | 343 | points = oom_badness(task, oc->totalpages); |
9066e5cf | 344 | if (points == LONG_MIN || points < oc->chosen_points) |
7c5f64f8 VD |
345 | goto next; |
346 | ||
7c5f64f8 VD |
347 | select: |
348 | if (oc->chosen) | |
349 | put_task_struct(oc->chosen); | |
350 | get_task_struct(task); | |
351 | oc->chosen = task; | |
352 | oc->chosen_points = points; | |
353 | next: | |
354 | return 0; | |
355 | abort: | |
356 | if (oc->chosen) | |
357 | put_task_struct(oc->chosen); | |
358 | oc->chosen = (void *)-1UL; | |
359 | return 1; | |
462607ec DR |
360 | } |
361 | ||
1da177e4 | 362 | /* |
7c5f64f8 VD |
363 | * Simple selection loop. We choose the process with the highest number of |
364 | * 'points'. In case scan was aborted, oc->chosen is set to -1. | |
1da177e4 | 365 | */ |
7c5f64f8 | 366 | static void select_bad_process(struct oom_control *oc) |
1da177e4 | 367 | { |
9066e5cf YS |
368 | oc->chosen_points = LONG_MIN; |
369 | ||
7c5f64f8 VD |
370 | if (is_memcg_oom(oc)) |
371 | mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); | |
372 | else { | |
373 | struct task_struct *p; | |
d49ad935 | 374 | |
7c5f64f8 VD |
375 | rcu_read_lock(); |
376 | for_each_process(p) | |
377 | if (oom_evaluate_task(p, oc)) | |
378 | break; | |
379 | rcu_read_unlock(); | |
1da4db0c | 380 | } |
1da177e4 LT |
381 | } |
382 | ||
5eee7e1c SB |
383 | static int dump_task(struct task_struct *p, void *arg) |
384 | { | |
385 | struct oom_control *oc = arg; | |
386 | struct task_struct *task; | |
387 | ||
ac311a14 SB |
388 | if (oom_unkillable_task(p)) |
389 | return 0; | |
390 | ||
391 | /* p may not have freeable memory in nodemask */ | |
392 | if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc)) | |
5eee7e1c SB |
393 | return 0; |
394 | ||
395 | task = find_lock_task_mm(p); | |
396 | if (!task) { | |
397 | /* | |
398 | * This is a kthread or all of p's threads have already | |
399 | * detached their mm's. There's no need to report | |
400 | * them; they can't be oom killed anyway. | |
401 | */ | |
402 | return 0; | |
403 | } | |
404 | ||
405 | pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n", | |
406 | task->pid, from_kuid(&init_user_ns, task_uid(task)), | |
407 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), | |
408 | mm_pgtables_bytes(task->mm), | |
409 | get_mm_counter(task->mm, MM_SWAPENTS), | |
410 | task->signal->oom_score_adj, task->comm); | |
411 | task_unlock(task); | |
412 | ||
413 | return 0; | |
414 | } | |
415 | ||
fef1bdd6 | 416 | /** |
1b578df0 | 417 | * dump_tasks - dump current memory state of all system tasks |
5eee7e1c | 418 | * @oc: pointer to struct oom_control |
1b578df0 | 419 | * |
e85bfd3a DR |
420 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
421 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | |
422 | * are not shown. | |
af5b0f6a KS |
423 | * State information includes task's pid, uid, tgid, vm size, rss, |
424 | * pgtables_bytes, swapents, oom_score_adj value, and name. | |
fef1bdd6 | 425 | */ |
5eee7e1c | 426 | static void dump_tasks(struct oom_control *oc) |
fef1bdd6 | 427 | { |
c3b78b11 RF |
428 | pr_info("Tasks state (memory values in pages):\n"); |
429 | pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n"); | |
fef1bdd6 | 430 | |
5eee7e1c SB |
431 | if (is_memcg_oom(oc)) |
432 | mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); | |
433 | else { | |
434 | struct task_struct *p; | |
c55db957 | 435 | |
5eee7e1c SB |
436 | rcu_read_lock(); |
437 | for_each_process(p) | |
438 | dump_task(p, oc); | |
439 | rcu_read_unlock(); | |
c55db957 | 440 | } |
fef1bdd6 DR |
441 | } |
442 | ||
ef8444ea | 443 | static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim) |
444 | { | |
445 | /* one line summary of the oom killer context. */ | |
446 | pr_info("oom-kill:constraint=%s,nodemask=%*pbl", | |
447 | oom_constraint_text[oc->constraint], | |
448 | nodemask_pr_args(oc->nodemask)); | |
449 | cpuset_print_current_mems_allowed(); | |
f0c867d9 | 450 | mem_cgroup_print_oom_context(oc->memcg, victim); |
ef8444ea | 451 | pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid, |
452 | from_kuid(&init_user_ns, task_uid(victim))); | |
453 | } | |
454 | ||
2a966b77 | 455 | static void dump_header(struct oom_control *oc, struct task_struct *p) |
1b604d75 | 456 | { |
ef8444ea | 457 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", |
458 | current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, | |
0205f755 | 459 | current->signal->oom_score_adj); |
9254990f MH |
460 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) |
461 | pr_warn("COMPACTION is disabled!!!\n"); | |
a0795cd4 | 462 | |
1b604d75 | 463 | dump_stack(); |
852d8be0 | 464 | if (is_memcg_oom(oc)) |
f0c867d9 | 465 | mem_cgroup_print_oom_meminfo(oc->memcg); |
852d8be0 | 466 | else { |
299c517a | 467 | show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); |
259b3633 | 468 | if (should_dump_unreclaim_slab()) |
852d8be0 YS |
469 | dump_unreclaimable_slab(); |
470 | } | |
1b604d75 | 471 | if (sysctl_oom_dump_tasks) |
5eee7e1c | 472 | dump_tasks(oc); |
ef8444ea | 473 | if (p) |
474 | dump_oom_summary(oc, p); | |
1b604d75 DR |
475 | } |
476 | ||
5695be14 | 477 | /* |
c32b3cbe | 478 | * Number of OOM victims in flight |
5695be14 | 479 | */ |
c32b3cbe MH |
480 | static atomic_t oom_victims = ATOMIC_INIT(0); |
481 | static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); | |
5695be14 | 482 | |
7c5f64f8 | 483 | static bool oom_killer_disabled __read_mostly; |
5695be14 | 484 | |
bc448e89 MH |
485 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
486 | ||
3ef22dff MH |
487 | /* |
488 | * task->mm can be NULL if the task is the exited group leader. So to | |
489 | * determine whether the task is using a particular mm, we examine all the | |
490 | * task's threads: if one of those is using this mm then this task was also | |
491 | * using it. | |
492 | */ | |
44a70ade | 493 | bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) |
3ef22dff MH |
494 | { |
495 | struct task_struct *t; | |
496 | ||
497 | for_each_thread(p, t) { | |
498 | struct mm_struct *t_mm = READ_ONCE(t->mm); | |
499 | if (t_mm) | |
500 | return t_mm == mm; | |
501 | } | |
502 | return false; | |
503 | } | |
504 | ||
aac45363 MH |
505 | #ifdef CONFIG_MMU |
506 | /* | |
507 | * OOM Reaper kernel thread which tries to reap the memory used by the OOM | |
508 | * victim (if that is possible) to help the OOM killer to move on. | |
509 | */ | |
510 | static struct task_struct *oom_reaper_th; | |
aac45363 | 511 | static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); |
29c696e1 | 512 | static struct task_struct *oom_reaper_list; |
03049269 MH |
513 | static DEFINE_SPINLOCK(oom_reaper_lock); |
514 | ||
93065ac7 | 515 | bool __oom_reap_task_mm(struct mm_struct *mm) |
aac45363 | 516 | { |
aac45363 | 517 | struct vm_area_struct *vma; |
93065ac7 | 518 | bool ret = true; |
27ae357f DR |
519 | |
520 | /* | |
521 | * Tell all users of get_user/copy_from_user etc... that the content | |
522 | * is no longer stable. No barriers really needed because unmapping | |
523 | * should imply barriers already and the reader would hit a page fault | |
524 | * if it stumbled over a reaped memory. | |
525 | */ | |
526 | set_bit(MMF_UNSTABLE, &mm->flags); | |
527 | ||
528 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { | |
9c276cc6 | 529 | if (!can_madv_lru_vma(vma)) |
27ae357f DR |
530 | continue; |
531 | ||
532 | /* | |
533 | * Only anonymous pages have a good chance to be dropped | |
534 | * without additional steps which we cannot afford as we | |
535 | * are OOM already. | |
536 | * | |
537 | * We do not even care about fs backed pages because all | |
538 | * which are reclaimable have already been reclaimed and | |
539 | * we do not want to block exit_mmap by keeping mm ref | |
540 | * count elevated without a good reason. | |
541 | */ | |
542 | if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { | |
ac46d4f3 | 543 | struct mmu_notifier_range range; |
27ae357f DR |
544 | struct mmu_gather tlb; |
545 | ||
6f4f13e8 JG |
546 | mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, |
547 | vma, mm, vma->vm_start, | |
ac46d4f3 JG |
548 | vma->vm_end); |
549 | tlb_gather_mmu(&tlb, mm, range.start, range.end); | |
550 | if (mmu_notifier_invalidate_range_start_nonblock(&range)) { | |
551 | tlb_finish_mmu(&tlb, range.start, range.end); | |
93065ac7 MH |
552 | ret = false; |
553 | continue; | |
554 | } | |
ac46d4f3 JG |
555 | unmap_page_range(&tlb, vma, range.start, range.end, NULL); |
556 | mmu_notifier_invalidate_range_end(&range); | |
557 | tlb_finish_mmu(&tlb, range.start, range.end); | |
27ae357f DR |
558 | } |
559 | } | |
93065ac7 MH |
560 | |
561 | return ret; | |
27ae357f DR |
562 | } |
563 | ||
431f42fd MH |
564 | /* |
565 | * Reaps the address space of the give task. | |
566 | * | |
567 | * Returns true on success and false if none or part of the address space | |
568 | * has been reclaimed and the caller should retry later. | |
569 | */ | |
27ae357f DR |
570 | static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) |
571 | { | |
aac45363 MH |
572 | bool ret = true; |
573 | ||
d8ed45c5 | 574 | if (!mmap_read_trylock(mm)) { |
422580c3 | 575 | trace_skip_task_reaping(tsk->pid); |
af5679fb | 576 | return false; |
4d4bbd85 MH |
577 | } |
578 | ||
e5e3f4c4 | 579 | /* |
21292580 AA |
580 | * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't |
581 | * work on the mm anymore. The check for MMF_OOM_SKIP must run | |
3e4e28c5 ML |
582 | * under mmap_lock for reading because it serializes against the |
583 | * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap(). | |
e5e3f4c4 | 584 | */ |
21292580 | 585 | if (test_bit(MMF_OOM_SKIP, &mm->flags)) { |
422580c3 | 586 | trace_skip_task_reaping(tsk->pid); |
431f42fd | 587 | goto out_unlock; |
aac45363 MH |
588 | } |
589 | ||
422580c3 RG |
590 | trace_start_task_reaping(tsk->pid); |
591 | ||
93065ac7 | 592 | /* failed to reap part of the address space. Try again later */ |
431f42fd MH |
593 | ret = __oom_reap_task_mm(mm); |
594 | if (!ret) | |
595 | goto out_finish; | |
aac45363 | 596 | |
bc448e89 MH |
597 | pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
598 | task_pid_nr(tsk), tsk->comm, | |
599 | K(get_mm_counter(mm, MM_ANONPAGES)), | |
600 | K(get_mm_counter(mm, MM_FILEPAGES)), | |
601 | K(get_mm_counter(mm, MM_SHMEMPAGES))); | |
431f42fd MH |
602 | out_finish: |
603 | trace_finish_task_reaping(tsk->pid); | |
604 | out_unlock: | |
d8ed45c5 | 605 | mmap_read_unlock(mm); |
36324a99 | 606 | |
aac45363 MH |
607 | return ret; |
608 | } | |
609 | ||
bc448e89 | 610 | #define MAX_OOM_REAP_RETRIES 10 |
36324a99 | 611 | static void oom_reap_task(struct task_struct *tsk) |
aac45363 MH |
612 | { |
613 | int attempts = 0; | |
26db62f1 | 614 | struct mm_struct *mm = tsk->signal->oom_mm; |
aac45363 | 615 | |
3e4e28c5 | 616 | /* Retry the mmap_read_trylock(mm) a few times */ |
27ae357f | 617 | while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm)) |
aac45363 MH |
618 | schedule_timeout_idle(HZ/10); |
619 | ||
97b1255c TH |
620 | if (attempts <= MAX_OOM_REAP_RETRIES || |
621 | test_bit(MMF_OOM_SKIP, &mm->flags)) | |
7ebffa45 | 622 | goto done; |
11a410d5 | 623 | |
7ebffa45 TH |
624 | pr_info("oom_reaper: unable to reap pid:%d (%s)\n", |
625 | task_pid_nr(tsk), tsk->comm); | |
8a7ff02a | 626 | sched_show_task(tsk); |
7ebffa45 | 627 | debug_show_all_locks(); |
bc448e89 | 628 | |
7ebffa45 | 629 | done: |
449d777d | 630 | tsk->oom_reaper_list = NULL; |
449d777d | 631 | |
26db62f1 MH |
632 | /* |
633 | * Hide this mm from OOM killer because it has been either reaped or | |
3e4e28c5 | 634 | * somebody can't call mmap_write_unlock(mm). |
26db62f1 | 635 | */ |
862e3073 | 636 | set_bit(MMF_OOM_SKIP, &mm->flags); |
26db62f1 | 637 | |
aac45363 | 638 | /* Drop a reference taken by wake_oom_reaper */ |
36324a99 | 639 | put_task_struct(tsk); |
aac45363 MH |
640 | } |
641 | ||
642 | static int oom_reaper(void *unused) | |
643 | { | |
644 | while (true) { | |
03049269 | 645 | struct task_struct *tsk = NULL; |
aac45363 | 646 | |
29c696e1 | 647 | wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); |
03049269 | 648 | spin_lock(&oom_reaper_lock); |
29c696e1 VD |
649 | if (oom_reaper_list != NULL) { |
650 | tsk = oom_reaper_list; | |
651 | oom_reaper_list = tsk->oom_reaper_list; | |
03049269 MH |
652 | } |
653 | spin_unlock(&oom_reaper_lock); | |
654 | ||
655 | if (tsk) | |
656 | oom_reap_task(tsk); | |
aac45363 MH |
657 | } |
658 | ||
659 | return 0; | |
660 | } | |
661 | ||
7c5f64f8 | 662 | static void wake_oom_reaper(struct task_struct *tsk) |
aac45363 | 663 | { |
9bcdeb51 TH |
664 | /* mm is already queued? */ |
665 | if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) | |
aac45363 MH |
666 | return; |
667 | ||
36324a99 | 668 | get_task_struct(tsk); |
aac45363 | 669 | |
03049269 | 670 | spin_lock(&oom_reaper_lock); |
29c696e1 VD |
671 | tsk->oom_reaper_list = oom_reaper_list; |
672 | oom_reaper_list = tsk; | |
03049269 | 673 | spin_unlock(&oom_reaper_lock); |
422580c3 | 674 | trace_wake_reaper(tsk->pid); |
03049269 | 675 | wake_up(&oom_reaper_wait); |
aac45363 MH |
676 | } |
677 | ||
678 | static int __init oom_init(void) | |
679 | { | |
680 | oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); | |
aac45363 MH |
681 | return 0; |
682 | } | |
683 | subsys_initcall(oom_init) | |
7c5f64f8 VD |
684 | #else |
685 | static inline void wake_oom_reaper(struct task_struct *tsk) | |
686 | { | |
687 | } | |
688 | #endif /* CONFIG_MMU */ | |
aac45363 | 689 | |
49550b60 | 690 | /** |
16e95196 | 691 | * mark_oom_victim - mark the given task as OOM victim |
49550b60 | 692 | * @tsk: task to mark |
c32b3cbe | 693 | * |
dc56401f | 694 | * Has to be called with oom_lock held and never after |
c32b3cbe | 695 | * oom has been disabled already. |
26db62f1 MH |
696 | * |
697 | * tsk->mm has to be non NULL and caller has to guarantee it is stable (either | |
698 | * under task_lock or operate on the current). | |
49550b60 | 699 | */ |
7c5f64f8 | 700 | static void mark_oom_victim(struct task_struct *tsk) |
49550b60 | 701 | { |
26db62f1 MH |
702 | struct mm_struct *mm = tsk->mm; |
703 | ||
c32b3cbe MH |
704 | WARN_ON(oom_killer_disabled); |
705 | /* OOM killer might race with memcg OOM */ | |
706 | if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) | |
707 | return; | |
26db62f1 | 708 | |
26db62f1 | 709 | /* oom_mm is bound to the signal struct life time. */ |
4837fe37 | 710 | if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { |
f1f10076 | 711 | mmgrab(tsk->signal->oom_mm); |
4837fe37 MH |
712 | set_bit(MMF_OOM_VICTIM, &mm->flags); |
713 | } | |
26db62f1 | 714 | |
63a8ca9b MH |
715 | /* |
716 | * Make sure that the task is woken up from uninterruptible sleep | |
717 | * if it is frozen because OOM killer wouldn't be able to free | |
718 | * any memory and livelock. freezing_slow_path will tell the freezer | |
719 | * that TIF_MEMDIE tasks should be ignored. | |
720 | */ | |
721 | __thaw_task(tsk); | |
c32b3cbe | 722 | atomic_inc(&oom_victims); |
422580c3 | 723 | trace_mark_victim(tsk->pid); |
49550b60 MH |
724 | } |
725 | ||
726 | /** | |
16e95196 | 727 | * exit_oom_victim - note the exit of an OOM victim |
49550b60 | 728 | */ |
38531201 | 729 | void exit_oom_victim(void) |
49550b60 | 730 | { |
38531201 | 731 | clear_thread_flag(TIF_MEMDIE); |
c32b3cbe | 732 | |
c38f1025 | 733 | if (!atomic_dec_return(&oom_victims)) |
c32b3cbe | 734 | wake_up_all(&oom_victims_wait); |
c32b3cbe MH |
735 | } |
736 | ||
7d2e7a22 MH |
737 | /** |
738 | * oom_killer_enable - enable OOM killer | |
739 | */ | |
740 | void oom_killer_enable(void) | |
741 | { | |
742 | oom_killer_disabled = false; | |
d75da004 | 743 | pr_info("OOM killer enabled.\n"); |
7d2e7a22 MH |
744 | } |
745 | ||
c32b3cbe MH |
746 | /** |
747 | * oom_killer_disable - disable OOM killer | |
7d2e7a22 | 748 | * @timeout: maximum timeout to wait for oom victims in jiffies |
c32b3cbe MH |
749 | * |
750 | * Forces all page allocations to fail rather than trigger OOM killer. | |
7d2e7a22 MH |
751 | * Will block and wait until all OOM victims are killed or the given |
752 | * timeout expires. | |
c32b3cbe MH |
753 | * |
754 | * The function cannot be called when there are runnable user tasks because | |
755 | * the userspace would see unexpected allocation failures as a result. Any | |
756 | * new usage of this function should be consulted with MM people. | |
757 | * | |
758 | * Returns true if successful and false if the OOM killer cannot be | |
759 | * disabled. | |
760 | */ | |
7d2e7a22 | 761 | bool oom_killer_disable(signed long timeout) |
c32b3cbe | 762 | { |
7d2e7a22 MH |
763 | signed long ret; |
764 | ||
c32b3cbe | 765 | /* |
6afcf289 TH |
766 | * Make sure to not race with an ongoing OOM killer. Check that the |
767 | * current is not killed (possibly due to sharing the victim's memory). | |
c32b3cbe | 768 | */ |
6afcf289 | 769 | if (mutex_lock_killable(&oom_lock)) |
c32b3cbe | 770 | return false; |
c32b3cbe | 771 | oom_killer_disabled = true; |
dc56401f | 772 | mutex_unlock(&oom_lock); |
c32b3cbe | 773 | |
7d2e7a22 MH |
774 | ret = wait_event_interruptible_timeout(oom_victims_wait, |
775 | !atomic_read(&oom_victims), timeout); | |
776 | if (ret <= 0) { | |
777 | oom_killer_enable(); | |
778 | return false; | |
779 | } | |
d75da004 | 780 | pr_info("OOM killer disabled.\n"); |
c32b3cbe MH |
781 | |
782 | return true; | |
783 | } | |
784 | ||
1af8bb43 MH |
785 | static inline bool __task_will_free_mem(struct task_struct *task) |
786 | { | |
787 | struct signal_struct *sig = task->signal; | |
788 | ||
789 | /* | |
790 | * A coredumping process may sleep for an extended period in exit_mm(), | |
791 | * so the oom killer cannot assume that the process will promptly exit | |
792 | * and release memory. | |
793 | */ | |
794 | if (sig->flags & SIGNAL_GROUP_COREDUMP) | |
795 | return false; | |
796 | ||
797 | if (sig->flags & SIGNAL_GROUP_EXIT) | |
798 | return true; | |
799 | ||
800 | if (thread_group_empty(task) && (task->flags & PF_EXITING)) | |
801 | return true; | |
802 | ||
803 | return false; | |
804 | } | |
805 | ||
806 | /* | |
807 | * Checks whether the given task is dying or exiting and likely to | |
808 | * release its address space. This means that all threads and processes | |
809 | * sharing the same mm have to be killed or exiting. | |
091f362c MH |
810 | * Caller has to make sure that task->mm is stable (hold task_lock or |
811 | * it operates on the current). | |
1af8bb43 | 812 | */ |
7c5f64f8 | 813 | static bool task_will_free_mem(struct task_struct *task) |
1af8bb43 | 814 | { |
091f362c | 815 | struct mm_struct *mm = task->mm; |
1af8bb43 | 816 | struct task_struct *p; |
f33e6f06 | 817 | bool ret = true; |
1af8bb43 | 818 | |
1af8bb43 | 819 | /* |
091f362c MH |
820 | * Skip tasks without mm because it might have passed its exit_mm and |
821 | * exit_oom_victim. oom_reaper could have rescued that but do not rely | |
822 | * on that for now. We can consider find_lock_task_mm in future. | |
1af8bb43 | 823 | */ |
091f362c | 824 | if (!mm) |
1af8bb43 MH |
825 | return false; |
826 | ||
091f362c MH |
827 | if (!__task_will_free_mem(task)) |
828 | return false; | |
696453e6 MH |
829 | |
830 | /* | |
831 | * This task has already been drained by the oom reaper so there are | |
832 | * only small chances it will free some more | |
833 | */ | |
862e3073 | 834 | if (test_bit(MMF_OOM_SKIP, &mm->flags)) |
696453e6 | 835 | return false; |
696453e6 | 836 | |
091f362c | 837 | if (atomic_read(&mm->mm_users) <= 1) |
1af8bb43 | 838 | return true; |
1af8bb43 MH |
839 | |
840 | /* | |
5870c2e1 MH |
841 | * Make sure that all tasks which share the mm with the given tasks |
842 | * are dying as well to make sure that a) nobody pins its mm and | |
843 | * b) the task is also reapable by the oom reaper. | |
1af8bb43 MH |
844 | */ |
845 | rcu_read_lock(); | |
846 | for_each_process(p) { | |
847 | if (!process_shares_mm(p, mm)) | |
848 | continue; | |
849 | if (same_thread_group(task, p)) | |
850 | continue; | |
851 | ret = __task_will_free_mem(p); | |
852 | if (!ret) | |
853 | break; | |
854 | } | |
855 | rcu_read_unlock(); | |
1af8bb43 MH |
856 | |
857 | return ret; | |
858 | } | |
859 | ||
bbbe4802 | 860 | static void __oom_kill_process(struct task_struct *victim, const char *message) |
1da177e4 | 861 | { |
5989ad7b | 862 | struct task_struct *p; |
647f2bdf | 863 | struct mm_struct *mm; |
bb29902a | 864 | bool can_oom_reap = true; |
1da177e4 | 865 | |
6b0c81b3 DR |
866 | p = find_lock_task_mm(victim); |
867 | if (!p) { | |
619b5b46 YS |
868 | pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n", |
869 | message, task_pid_nr(victim), victim->comm); | |
6b0c81b3 | 870 | put_task_struct(victim); |
647f2bdf | 871 | return; |
6b0c81b3 DR |
872 | } else if (victim != p) { |
873 | get_task_struct(p); | |
874 | put_task_struct(victim); | |
875 | victim = p; | |
876 | } | |
647f2bdf | 877 | |
880b7689 | 878 | /* Get a reference to safely compare mm after task_unlock(victim) */ |
647f2bdf | 879 | mm = victim->mm; |
f1f10076 | 880 | mmgrab(mm); |
8e675f7a KK |
881 | |
882 | /* Raise event before sending signal: task reaper must see this */ | |
883 | count_vm_event(OOM_KILL); | |
fe6bdfc8 | 884 | memcg_memory_event_mm(mm, MEMCG_OOM_KILL); |
8e675f7a | 885 | |
426fb5e7 | 886 | /* |
cd04ae1e MH |
887 | * We should send SIGKILL before granting access to memory reserves |
888 | * in order to prevent the OOM victim from depleting the memory | |
889 | * reserves from the user space under its control. | |
426fb5e7 | 890 | */ |
079b22dc | 891 | do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID); |
16e95196 | 892 | mark_oom_victim(victim); |
70cb6d26 EC |
893 | pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n", |
894 | message, task_pid_nr(victim), victim->comm, K(mm->total_vm), | |
895 | K(get_mm_counter(mm, MM_ANONPAGES)), | |
896 | K(get_mm_counter(mm, MM_FILEPAGES)), | |
897 | K(get_mm_counter(mm, MM_SHMEMPAGES)), | |
898 | from_kuid(&init_user_ns, task_uid(victim)), | |
941f762b | 899 | mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); |
647f2bdf DR |
900 | task_unlock(victim); |
901 | ||
902 | /* | |
903 | * Kill all user processes sharing victim->mm in other thread groups, if | |
904 | * any. They don't get access to memory reserves, though, to avoid | |
c1e8d7c6 | 905 | * depletion of all memory. This prevents mm->mmap_lock livelock when an |
647f2bdf DR |
906 | * oom killed thread cannot exit because it requires the semaphore and |
907 | * its contended by another thread trying to allocate memory itself. | |
908 | * That thread will now get access to memory reserves since it has a | |
909 | * pending fatal signal. | |
910 | */ | |
4d4048be | 911 | rcu_read_lock(); |
c319025a | 912 | for_each_process(p) { |
4d7b3394 | 913 | if (!process_shares_mm(p, mm)) |
c319025a ON |
914 | continue; |
915 | if (same_thread_group(p, victim)) | |
916 | continue; | |
1b51e65e | 917 | if (is_global_init(p)) { |
aac45363 | 918 | can_oom_reap = false; |
862e3073 | 919 | set_bit(MMF_OOM_SKIP, &mm->flags); |
a373966d MH |
920 | pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", |
921 | task_pid_nr(victim), victim->comm, | |
922 | task_pid_nr(p), p->comm); | |
c319025a | 923 | continue; |
aac45363 | 924 | } |
1b51e65e | 925 | /* |
f5678e7f CH |
926 | * No kthead_use_mm() user needs to read from the userspace so |
927 | * we are ok to reap it. | |
1b51e65e MH |
928 | */ |
929 | if (unlikely(p->flags & PF_KTHREAD)) | |
930 | continue; | |
079b22dc | 931 | do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID); |
c319025a | 932 | } |
6b0c81b3 | 933 | rcu_read_unlock(); |
647f2bdf | 934 | |
aac45363 | 935 | if (can_oom_reap) |
36324a99 | 936 | wake_oom_reaper(victim); |
aac45363 | 937 | |
880b7689 | 938 | mmdrop(mm); |
6b0c81b3 | 939 | put_task_struct(victim); |
1da177e4 | 940 | } |
647f2bdf | 941 | #undef K |
1da177e4 | 942 | |
3d8b38eb RG |
943 | /* |
944 | * Kill provided task unless it's secured by setting | |
945 | * oom_score_adj to OOM_SCORE_ADJ_MIN. | |
946 | */ | |
bbbe4802 | 947 | static int oom_kill_memcg_member(struct task_struct *task, void *message) |
3d8b38eb | 948 | { |
d342a0b3 TH |
949 | if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN && |
950 | !is_global_init(task)) { | |
3d8b38eb | 951 | get_task_struct(task); |
bbbe4802 | 952 | __oom_kill_process(task, message); |
3d8b38eb RG |
953 | } |
954 | return 0; | |
955 | } | |
956 | ||
5989ad7b RG |
957 | static void oom_kill_process(struct oom_control *oc, const char *message) |
958 | { | |
bbbe4802 | 959 | struct task_struct *victim = oc->chosen; |
3d8b38eb | 960 | struct mem_cgroup *oom_group; |
5989ad7b RG |
961 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
962 | DEFAULT_RATELIMIT_BURST); | |
963 | ||
964 | /* | |
965 | * If the task is already exiting, don't alarm the sysadmin or kill | |
966 | * its children or threads, just give it access to memory reserves | |
967 | * so it can die quickly | |
968 | */ | |
bbbe4802 SB |
969 | task_lock(victim); |
970 | if (task_will_free_mem(victim)) { | |
971 | mark_oom_victim(victim); | |
972 | wake_oom_reaper(victim); | |
973 | task_unlock(victim); | |
974 | put_task_struct(victim); | |
5989ad7b RG |
975 | return; |
976 | } | |
bbbe4802 | 977 | task_unlock(victim); |
5989ad7b RG |
978 | |
979 | if (__ratelimit(&oom_rs)) | |
bbbe4802 | 980 | dump_header(oc, victim); |
5989ad7b | 981 | |
3d8b38eb RG |
982 | /* |
983 | * Do we need to kill the entire memory cgroup? | |
984 | * Or even one of the ancestor memory cgroups? | |
985 | * Check this out before killing the victim task. | |
986 | */ | |
987 | oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); | |
988 | ||
bbbe4802 | 989 | __oom_kill_process(victim, message); |
3d8b38eb RG |
990 | |
991 | /* | |
992 | * If necessary, kill all tasks in the selected memory cgroup. | |
993 | */ | |
994 | if (oom_group) { | |
995 | mem_cgroup_print_oom_group(oom_group); | |
bbbe4802 SB |
996 | mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member, |
997 | (void*)message); | |
3d8b38eb RG |
998 | mem_cgroup_put(oom_group); |
999 | } | |
5989ad7b RG |
1000 | } |
1001 | ||
309ed882 DR |
1002 | /* |
1003 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | |
1004 | */ | |
432b1de0 | 1005 | static void check_panic_on_oom(struct oom_control *oc) |
309ed882 DR |
1006 | { |
1007 | if (likely(!sysctl_panic_on_oom)) | |
1008 | return; | |
1009 | if (sysctl_panic_on_oom != 2) { | |
1010 | /* | |
1011 | * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel | |
1012 | * does not panic for cpuset, mempolicy, or memcg allocation | |
1013 | * failures. | |
1014 | */ | |
432b1de0 | 1015 | if (oc->constraint != CONSTRAINT_NONE) |
309ed882 DR |
1016 | return; |
1017 | } | |
071a4bef | 1018 | /* Do not panic for oom kills triggered by sysrq */ |
db2a0dd7 | 1019 | if (is_sysrq_oom(oc)) |
071a4bef | 1020 | return; |
2a966b77 | 1021 | dump_header(oc, NULL); |
309ed882 DR |
1022 | panic("Out of memory: %s panic_on_oom is enabled\n", |
1023 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | |
1024 | } | |
1025 | ||
8bc719d3 MS |
1026 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); |
1027 | ||
1028 | int register_oom_notifier(struct notifier_block *nb) | |
1029 | { | |
1030 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
1031 | } | |
1032 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
1033 | ||
1034 | int unregister_oom_notifier(struct notifier_block *nb) | |
1035 | { | |
1036 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
1037 | } | |
1038 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
1039 | ||
1da177e4 | 1040 | /** |
6e0fc46d DR |
1041 | * out_of_memory - kill the "best" process when we run out of memory |
1042 | * @oc: pointer to struct oom_control | |
1da177e4 LT |
1043 | * |
1044 | * If we run out of memory, we have the choice between either | |
1045 | * killing a random task (bad), letting the system crash (worse) | |
1046 | * OR try to be smart about which process to kill. Note that we | |
1047 | * don't have to be perfect here, we just have to be good. | |
1048 | */ | |
6e0fc46d | 1049 | bool out_of_memory(struct oom_control *oc) |
1da177e4 | 1050 | { |
8bc719d3 MS |
1051 | unsigned long freed = 0; |
1052 | ||
dc56401f JW |
1053 | if (oom_killer_disabled) |
1054 | return false; | |
1055 | ||
7c5f64f8 VD |
1056 | if (!is_memcg_oom(oc)) { |
1057 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | |
1058 | if (freed > 0) | |
1059 | /* Got some memory back in the last second. */ | |
1060 | return true; | |
1061 | } | |
1da177e4 | 1062 | |
7b98c2e4 | 1063 | /* |
9ff4868e DR |
1064 | * If current has a pending SIGKILL or is exiting, then automatically |
1065 | * select it. The goal is to allow it to allocate so that it may | |
1066 | * quickly exit and free its memory. | |
7b98c2e4 | 1067 | */ |
091f362c | 1068 | if (task_will_free_mem(current)) { |
16e95196 | 1069 | mark_oom_victim(current); |
1af8bb43 | 1070 | wake_oom_reaper(current); |
75e8f8b2 | 1071 | return true; |
7b98c2e4 DR |
1072 | } |
1073 | ||
3da88fb3 MH |
1074 | /* |
1075 | * The OOM killer does not compensate for IO-less reclaim. | |
1076 | * pagefault_out_of_memory lost its gfp context so we have to | |
1077 | * make sure exclude 0 mask - all other users should have at least | |
f9c64562 TH |
1078 | * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to |
1079 | * invoke the OOM killer even if it is a GFP_NOFS allocation. | |
3da88fb3 | 1080 | */ |
f9c64562 | 1081 | if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) |
3da88fb3 MH |
1082 | return true; |
1083 | ||
9b0f8b04 CL |
1084 | /* |
1085 | * Check if there were limitations on the allocation (only relevant for | |
7c5f64f8 | 1086 | * NUMA and memcg) that may require different handling. |
9b0f8b04 | 1087 | */ |
432b1de0 YS |
1088 | oc->constraint = constrained_alloc(oc); |
1089 | if (oc->constraint != CONSTRAINT_MEMORY_POLICY) | |
6e0fc46d | 1090 | oc->nodemask = NULL; |
432b1de0 | 1091 | check_panic_on_oom(oc); |
0aad4b31 | 1092 | |
7c5f64f8 | 1093 | if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && |
ac311a14 SB |
1094 | current->mm && !oom_unkillable_task(current) && |
1095 | oom_cpuset_eligible(current, oc) && | |
121d1ba0 | 1096 | current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { |
6b0c81b3 | 1097 | get_task_struct(current); |
7c5f64f8 VD |
1098 | oc->chosen = current; |
1099 | oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); | |
75e8f8b2 | 1100 | return true; |
0aad4b31 DR |
1101 | } |
1102 | ||
7c5f64f8 | 1103 | select_bad_process(oc); |
3100dab2 JW |
1104 | /* Found nothing?!?! */ |
1105 | if (!oc->chosen) { | |
2a966b77 | 1106 | dump_header(oc, NULL); |
3100dab2 JW |
1107 | pr_warn("Out of memory and no killable processes...\n"); |
1108 | /* | |
1109 | * If we got here due to an actual allocation at the | |
1110 | * system level, we cannot survive this and will enter | |
1111 | * an endless loop in the allocator. Bail out now. | |
1112 | */ | |
1113 | if (!is_sysrq_oom(oc) && !is_memcg_oom(oc)) | |
1114 | panic("System is deadlocked on memory\n"); | |
0aad4b31 | 1115 | } |
9bfe5ded | 1116 | if (oc->chosen && oc->chosen != (void *)-1UL) |
7c5f64f8 VD |
1117 | oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : |
1118 | "Memory cgroup out of memory"); | |
7c5f64f8 | 1119 | return !!oc->chosen; |
c32b3cbe MH |
1120 | } |
1121 | ||
e3658932 DR |
1122 | /* |
1123 | * The pagefault handler calls here because it is out of memory, so kill a | |
798fd756 VD |
1124 | * memory-hogging task. If oom_lock is held by somebody else, a parallel oom |
1125 | * killing is already in progress so do nothing. | |
e3658932 DR |
1126 | */ |
1127 | void pagefault_out_of_memory(void) | |
1128 | { | |
6e0fc46d DR |
1129 | struct oom_control oc = { |
1130 | .zonelist = NULL, | |
1131 | .nodemask = NULL, | |
2a966b77 | 1132 | .memcg = NULL, |
6e0fc46d DR |
1133 | .gfp_mask = 0, |
1134 | .order = 0, | |
6e0fc46d DR |
1135 | }; |
1136 | ||
49426420 | 1137 | if (mem_cgroup_oom_synchronize(true)) |
dc56401f | 1138 | return; |
3812c8c8 | 1139 | |
dc56401f JW |
1140 | if (!mutex_trylock(&oom_lock)) |
1141 | return; | |
a104808e | 1142 | out_of_memory(&oc); |
dc56401f | 1143 | mutex_unlock(&oom_lock); |
e3658932 | 1144 | } |