]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/oom_kill.c | |
3 | * | |
4 | * Copyright (C) 1998,2000 Rik van Riel | |
5 | * Thanks go out to Claus Fischer for some serious inspiration and | |
6 | * for goading me into coding this file... | |
a63d83f4 DR |
7 | * Copyright (C) 2010 Google, Inc. |
8 | * Rewritten by David Rientjes | |
1da177e4 LT |
9 | * |
10 | * The routines in this file are used to kill a process when | |
a49335cc PJ |
11 | * we're seriously out of memory. This gets called from __alloc_pages() |
12 | * in mm/page_alloc.c when we really run out of memory. | |
1da177e4 LT |
13 | * |
14 | * Since we won't call these routines often (on a well-configured | |
15 | * machine) this file will double as a 'coding guide' and a signpost | |
16 | * for newbie kernel hackers. It features several pointers to major | |
17 | * kernel subsystems and hints as to where to find out what things do. | |
18 | */ | |
19 | ||
8ac773b4 | 20 | #include <linux/oom.h> |
1da177e4 | 21 | #include <linux/mm.h> |
4e950f6f | 22 | #include <linux/err.h> |
5a0e3ad6 | 23 | #include <linux/gfp.h> |
1da177e4 LT |
24 | #include <linux/sched.h> |
25 | #include <linux/swap.h> | |
26 | #include <linux/timex.h> | |
27 | #include <linux/jiffies.h> | |
ef08e3b4 | 28 | #include <linux/cpuset.h> |
b95f1b31 | 29 | #include <linux/export.h> |
8bc719d3 | 30 | #include <linux/notifier.h> |
c7ba5c9e | 31 | #include <linux/memcontrol.h> |
6f48d0eb | 32 | #include <linux/mempolicy.h> |
5cd9c58f | 33 | #include <linux/security.h> |
edd45544 | 34 | #include <linux/ptrace.h> |
f660daac | 35 | #include <linux/freezer.h> |
43d2b113 | 36 | #include <linux/ftrace.h> |
dc3f21ea | 37 | #include <linux/ratelimit.h> |
43d2b113 KH |
38 | |
39 | #define CREATE_TRACE_POINTS | |
40 | #include <trace/events/oom.h> | |
1da177e4 | 41 | |
fadd8fbd | 42 | int sysctl_panic_on_oom; |
fe071d7e | 43 | int sysctl_oom_kill_allocating_task; |
ad915c43 | 44 | int sysctl_oom_dump_tasks = 1; |
c7d4caeb | 45 | static DEFINE_SPINLOCK(zone_scan_lock); |
1da177e4 | 46 | |
6f48d0eb DR |
47 | #ifdef CONFIG_NUMA |
48 | /** | |
49 | * has_intersects_mems_allowed() - check task eligiblity for kill | |
50 | * @tsk: task struct of which task to consider | |
51 | * @mask: nodemask passed to page allocator for mempolicy ooms | |
52 | * | |
53 | * Task eligibility is determined by whether or not a candidate task, @tsk, | |
54 | * shares the same mempolicy nodes as current if it is bound by such a policy | |
55 | * and whether or not it has the same set of allowed cpuset nodes. | |
495789a5 | 56 | */ |
6f48d0eb DR |
57 | static bool has_intersects_mems_allowed(struct task_struct *tsk, |
58 | const nodemask_t *mask) | |
495789a5 | 59 | { |
6f48d0eb | 60 | struct task_struct *start = tsk; |
495789a5 | 61 | |
495789a5 | 62 | do { |
6f48d0eb DR |
63 | if (mask) { |
64 | /* | |
65 | * If this is a mempolicy constrained oom, tsk's | |
66 | * cpuset is irrelevant. Only return true if its | |
67 | * mempolicy intersects current, otherwise it may be | |
68 | * needlessly killed. | |
69 | */ | |
70 | if (mempolicy_nodemask_intersects(tsk, mask)) | |
71 | return true; | |
72 | } else { | |
73 | /* | |
74 | * This is not a mempolicy constrained oom, so only | |
75 | * check the mems of tsk's cpuset. | |
76 | */ | |
77 | if (cpuset_mems_allowed_intersects(current, tsk)) | |
78 | return true; | |
79 | } | |
df1090a8 KM |
80 | } while_each_thread(start, tsk); |
81 | ||
6f48d0eb DR |
82 | return false; |
83 | } | |
84 | #else | |
85 | static bool has_intersects_mems_allowed(struct task_struct *tsk, | |
86 | const nodemask_t *mask) | |
87 | { | |
88 | return true; | |
495789a5 | 89 | } |
6f48d0eb | 90 | #endif /* CONFIG_NUMA */ |
495789a5 | 91 | |
6f48d0eb DR |
92 | /* |
93 | * The process p may have detached its own ->mm while exiting or through | |
94 | * use_mm(), but one or more of its subthreads may still have a valid | |
95 | * pointer. Return p, or any of its subthreads with a valid ->mm, with | |
96 | * task_lock() held. | |
97 | */ | |
158e0a2d | 98 | struct task_struct *find_lock_task_mm(struct task_struct *p) |
dd8e8f40 ON |
99 | { |
100 | struct task_struct *t = p; | |
101 | ||
102 | do { | |
103 | task_lock(t); | |
104 | if (likely(t->mm)) | |
105 | return t; | |
106 | task_unlock(t); | |
107 | } while_each_thread(p, t); | |
108 | ||
109 | return NULL; | |
110 | } | |
111 | ||
ab290adb | 112 | /* return true if the task is not adequate as candidate victim task. */ |
e85bfd3a | 113 | static bool oom_unkillable_task(struct task_struct *p, |
72835c86 | 114 | const struct mem_cgroup *memcg, const nodemask_t *nodemask) |
ab290adb KM |
115 | { |
116 | if (is_global_init(p)) | |
117 | return true; | |
118 | if (p->flags & PF_KTHREAD) | |
119 | return true; | |
120 | ||
121 | /* When mem_cgroup_out_of_memory() and p is not member of the group */ | |
72835c86 | 122 | if (memcg && !task_in_mem_cgroup(p, memcg)) |
ab290adb KM |
123 | return true; |
124 | ||
125 | /* p may not have freeable memory in nodemask */ | |
126 | if (!has_intersects_mems_allowed(p, nodemask)) | |
127 | return true; | |
128 | ||
129 | return false; | |
130 | } | |
131 | ||
1da177e4 | 132 | /** |
a63d83f4 | 133 | * oom_badness - heuristic function to determine which candidate task to kill |
1da177e4 | 134 | * @p: task struct of which task we should calculate |
a63d83f4 | 135 | * @totalpages: total present RAM allowed for page allocation |
1da177e4 | 136 | * |
a63d83f4 DR |
137 | * The heuristic for determining which task to kill is made to be as simple and |
138 | * predictable as possible. The goal is to return the highest value for the | |
139 | * task consuming the most memory to avoid subsequent oom failures. | |
1da177e4 | 140 | */ |
a7f638f9 DR |
141 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
142 | const nodemask_t *nodemask, unsigned long totalpages) | |
1da177e4 | 143 | { |
1e11ad8d | 144 | long points; |
61eafb00 | 145 | long adj; |
28b83c51 | 146 | |
72835c86 | 147 | if (oom_unkillable_task(p, memcg, nodemask)) |
26ebc984 | 148 | return 0; |
1da177e4 | 149 | |
dd8e8f40 ON |
150 | p = find_lock_task_mm(p); |
151 | if (!p) | |
1da177e4 LT |
152 | return 0; |
153 | ||
a9c58b90 | 154 | adj = (long)p->signal->oom_score_adj; |
61eafb00 | 155 | if (adj == OOM_SCORE_ADJ_MIN) { |
5aecc85a MH |
156 | task_unlock(p); |
157 | return 0; | |
158 | } | |
159 | ||
1da177e4 | 160 | /* |
a63d83f4 | 161 | * The baseline for the badness score is the proportion of RAM that each |
f755a042 | 162 | * task's rss, pagetable and swap space use. |
1da177e4 | 163 | */ |
a7f638f9 DR |
164 | points = get_mm_rss(p->mm) + p->mm->nr_ptes + |
165 | get_mm_counter(p->mm, MM_SWAPENTS); | |
a63d83f4 | 166 | task_unlock(p); |
1da177e4 LT |
167 | |
168 | /* | |
a63d83f4 DR |
169 | * Root processes get 3% bonus, just like the __vm_enough_memory() |
170 | * implementation used by LSMs. | |
1da177e4 | 171 | */ |
a63d83f4 | 172 | if (has_capability_noaudit(p, CAP_SYS_ADMIN)) |
61eafb00 | 173 | adj -= 30; |
1da177e4 | 174 | |
61eafb00 DR |
175 | /* Normalize to oom_score_adj units */ |
176 | adj *= totalpages / 1000; | |
177 | points += adj; | |
1da177e4 | 178 | |
f19e8aa1 | 179 | /* |
a7f638f9 DR |
180 | * Never return 0 for an eligible task regardless of the root bonus and |
181 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). | |
f19e8aa1 | 182 | */ |
1e11ad8d | 183 | return points > 0 ? points : 1; |
1da177e4 LT |
184 | } |
185 | ||
9b0f8b04 CL |
186 | /* |
187 | * Determine the type of allocation constraint. | |
188 | */ | |
9b0f8b04 | 189 | #ifdef CONFIG_NUMA |
4365a567 | 190 | static enum oom_constraint constrained_alloc(struct zonelist *zonelist, |
a63d83f4 DR |
191 | gfp_t gfp_mask, nodemask_t *nodemask, |
192 | unsigned long *totalpages) | |
4365a567 | 193 | { |
54a6eb5c | 194 | struct zone *zone; |
dd1a239f | 195 | struct zoneref *z; |
54a6eb5c | 196 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
a63d83f4 DR |
197 | bool cpuset_limited = false; |
198 | int nid; | |
9b0f8b04 | 199 | |
a63d83f4 DR |
200 | /* Default to all available memory */ |
201 | *totalpages = totalram_pages + total_swap_pages; | |
202 | ||
203 | if (!zonelist) | |
204 | return CONSTRAINT_NONE; | |
4365a567 KH |
205 | /* |
206 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid | |
207 | * to kill current.We have to random task kill in this case. | |
208 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. | |
209 | */ | |
210 | if (gfp_mask & __GFP_THISNODE) | |
211 | return CONSTRAINT_NONE; | |
9b0f8b04 | 212 | |
4365a567 | 213 | /* |
a63d83f4 DR |
214 | * This is not a __GFP_THISNODE allocation, so a truncated nodemask in |
215 | * the page allocator means a mempolicy is in effect. Cpuset policy | |
216 | * is enforced in get_page_from_freelist(). | |
4365a567 | 217 | */ |
bd3a66c1 | 218 | if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) { |
a63d83f4 DR |
219 | *totalpages = total_swap_pages; |
220 | for_each_node_mask(nid, *nodemask) | |
221 | *totalpages += node_spanned_pages(nid); | |
9b0f8b04 | 222 | return CONSTRAINT_MEMORY_POLICY; |
a63d83f4 | 223 | } |
4365a567 KH |
224 | |
225 | /* Check this allocation failure is caused by cpuset's wall function */ | |
226 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | |
227 | high_zoneidx, nodemask) | |
228 | if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) | |
a63d83f4 | 229 | cpuset_limited = true; |
9b0f8b04 | 230 | |
a63d83f4 DR |
231 | if (cpuset_limited) { |
232 | *totalpages = total_swap_pages; | |
233 | for_each_node_mask(nid, cpuset_current_mems_allowed) | |
234 | *totalpages += node_spanned_pages(nid); | |
235 | return CONSTRAINT_CPUSET; | |
236 | } | |
9b0f8b04 CL |
237 | return CONSTRAINT_NONE; |
238 | } | |
4365a567 KH |
239 | #else |
240 | static enum oom_constraint constrained_alloc(struct zonelist *zonelist, | |
a63d83f4 DR |
241 | gfp_t gfp_mask, nodemask_t *nodemask, |
242 | unsigned long *totalpages) | |
4365a567 | 243 | { |
a63d83f4 | 244 | *totalpages = totalram_pages + total_swap_pages; |
4365a567 KH |
245 | return CONSTRAINT_NONE; |
246 | } | |
247 | #endif | |
9b0f8b04 | 248 | |
9cbb78bb DR |
249 | enum oom_scan_t oom_scan_process_thread(struct task_struct *task, |
250 | unsigned long totalpages, const nodemask_t *nodemask, | |
251 | bool force_kill) | |
462607ec DR |
252 | { |
253 | if (task->exit_state) | |
254 | return OOM_SCAN_CONTINUE; | |
9cbb78bb | 255 | if (oom_unkillable_task(task, NULL, nodemask)) |
462607ec DR |
256 | return OOM_SCAN_CONTINUE; |
257 | ||
258 | /* | |
259 | * This task already has access to memory reserves and is being killed. | |
260 | * Don't allow any other task to have access to the reserves. | |
261 | */ | |
262 | if (test_tsk_thread_flag(task, TIF_MEMDIE)) { | |
263 | if (unlikely(frozen(task))) | |
264 | __thaw_task(task); | |
265 | if (!force_kill) | |
266 | return OOM_SCAN_ABORT; | |
267 | } | |
268 | if (!task->mm) | |
269 | return OOM_SCAN_CONTINUE; | |
270 | ||
e1e12d2f DR |
271 | /* |
272 | * If task is allocating a lot of memory and has been marked to be | |
273 | * killed first if it triggers an oom, then select it. | |
274 | */ | |
275 | if (oom_task_origin(task)) | |
276 | return OOM_SCAN_SELECT; | |
277 | ||
9ff4868e | 278 | if (task->flags & PF_EXITING && !force_kill) { |
462607ec | 279 | /* |
9ff4868e DR |
280 | * If this task is not being ptraced on exit, then wait for it |
281 | * to finish before killing some other task unnecessarily. | |
462607ec | 282 | */ |
9ff4868e DR |
283 | if (!(task->group_leader->ptrace & PT_TRACE_EXIT)) |
284 | return OOM_SCAN_ABORT; | |
462607ec DR |
285 | } |
286 | return OOM_SCAN_OK; | |
287 | } | |
288 | ||
1da177e4 LT |
289 | /* |
290 | * Simple selection loop. We chose the process with the highest | |
6b4f2b56 | 291 | * number of 'points'. Returns -1 on scan abort. |
1da177e4 LT |
292 | * |
293 | * (not docbooked, we don't want this one cluttering up the manual) | |
294 | */ | |
a63d83f4 | 295 | static struct task_struct *select_bad_process(unsigned int *ppoints, |
9cbb78bb DR |
296 | unsigned long totalpages, const nodemask_t *nodemask, |
297 | bool force_kill) | |
1da177e4 | 298 | { |
3a5dda7a | 299 | struct task_struct *g, *p; |
1da177e4 | 300 | struct task_struct *chosen = NULL; |
a7f638f9 | 301 | unsigned long chosen_points = 0; |
1da177e4 | 302 | |
6b0c81b3 | 303 | rcu_read_lock(); |
3a5dda7a | 304 | do_each_thread(g, p) { |
a63d83f4 | 305 | unsigned int points; |
a49335cc | 306 | |
9cbb78bb | 307 | switch (oom_scan_process_thread(p, totalpages, nodemask, |
462607ec DR |
308 | force_kill)) { |
309 | case OOM_SCAN_SELECT: | |
310 | chosen = p; | |
311 | chosen_points = ULONG_MAX; | |
312 | /* fall through */ | |
313 | case OOM_SCAN_CONTINUE: | |
c027a474 | 314 | continue; |
462607ec | 315 | case OOM_SCAN_ABORT: |
6b0c81b3 | 316 | rcu_read_unlock(); |
6b4f2b56 | 317 | return (struct task_struct *)(-1UL); |
462607ec DR |
318 | case OOM_SCAN_OK: |
319 | break; | |
320 | }; | |
9cbb78bb | 321 | points = oom_badness(p, NULL, nodemask, totalpages); |
a7f638f9 | 322 | if (points > chosen_points) { |
a49335cc | 323 | chosen = p; |
a7f638f9 | 324 | chosen_points = points; |
1da177e4 | 325 | } |
3a5dda7a | 326 | } while_each_thread(g, p); |
6b0c81b3 DR |
327 | if (chosen) |
328 | get_task_struct(chosen); | |
329 | rcu_read_unlock(); | |
972c4ea5 | 330 | |
a7f638f9 | 331 | *ppoints = chosen_points * 1000 / totalpages; |
1da177e4 LT |
332 | return chosen; |
333 | } | |
334 | ||
fef1bdd6 | 335 | /** |
1b578df0 | 336 | * dump_tasks - dump current memory state of all system tasks |
dad7557e | 337 | * @memcg: current's memory controller, if constrained |
e85bfd3a | 338 | * @nodemask: nodemask passed to page allocator for mempolicy ooms |
1b578df0 | 339 | * |
e85bfd3a DR |
340 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
341 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | |
342 | * are not shown. | |
de34d965 DR |
343 | * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, |
344 | * swapents, oom_score_adj value, and name. | |
fef1bdd6 | 345 | */ |
72835c86 | 346 | static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask) |
fef1bdd6 | 347 | { |
c55db957 KM |
348 | struct task_struct *p; |
349 | struct task_struct *task; | |
fef1bdd6 | 350 | |
de34d965 | 351 | pr_info("[ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name\n"); |
6b0c81b3 | 352 | rcu_read_lock(); |
c55db957 | 353 | for_each_process(p) { |
72835c86 | 354 | if (oom_unkillable_task(p, memcg, nodemask)) |
b4416d2b | 355 | continue; |
fef1bdd6 | 356 | |
c55db957 KM |
357 | task = find_lock_task_mm(p); |
358 | if (!task) { | |
6d2661ed | 359 | /* |
74ab7f1d DR |
360 | * This is a kthread or all of p's threads have already |
361 | * detached their mm's. There's no need to report | |
c55db957 | 362 | * them; they can't be oom killed anyway. |
6d2661ed | 363 | */ |
6d2661ed DR |
364 | continue; |
365 | } | |
c55db957 | 366 | |
a9c58b90 | 367 | pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu %5hd %s\n", |
078de5f7 EB |
368 | task->pid, from_kuid(&init_user_ns, task_uid(task)), |
369 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), | |
de34d965 DR |
370 | task->mm->nr_ptes, |
371 | get_mm_counter(task->mm, MM_SWAPENTS), | |
a63d83f4 | 372 | task->signal->oom_score_adj, task->comm); |
c55db957 KM |
373 | task_unlock(task); |
374 | } | |
6b0c81b3 | 375 | rcu_read_unlock(); |
fef1bdd6 DR |
376 | } |
377 | ||
d31f56db | 378 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, |
72835c86 | 379 | struct mem_cgroup *memcg, const nodemask_t *nodemask) |
1b604d75 | 380 | { |
5e9d834a | 381 | task_lock(current); |
1b604d75 | 382 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " |
a9c58b90 | 383 | "oom_score_adj=%hd\n", |
01dc52eb | 384 | current->comm, gfp_mask, order, |
a63d83f4 | 385 | current->signal->oom_score_adj); |
1b604d75 DR |
386 | cpuset_print_task_mems_allowed(current); |
387 | task_unlock(current); | |
388 | dump_stack(); | |
58cf188e SZ |
389 | if (memcg) |
390 | mem_cgroup_print_oom_info(memcg, p); | |
391 | else | |
392 | show_mem(SHOW_MEM_FILTER_NODES); | |
1b604d75 | 393 | if (sysctl_oom_dump_tasks) |
72835c86 | 394 | dump_tasks(memcg, nodemask); |
1b604d75 DR |
395 | } |
396 | ||
3b4798cb | 397 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
6b0c81b3 DR |
398 | /* |
399 | * Must be called while holding a reference to p, which will be released upon | |
400 | * returning. | |
401 | */ | |
9cbb78bb DR |
402 | void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, |
403 | unsigned int points, unsigned long totalpages, | |
404 | struct mem_cgroup *memcg, nodemask_t *nodemask, | |
405 | const char *message) | |
1da177e4 | 406 | { |
52d3c036 | 407 | struct task_struct *victim = p; |
5e9d834a | 408 | struct task_struct *child; |
52d3c036 | 409 | struct task_struct *t = p; |
647f2bdf | 410 | struct mm_struct *mm; |
52d3c036 | 411 | unsigned int victim_points = 0; |
dc3f21ea DR |
412 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
413 | DEFAULT_RATELIMIT_BURST); | |
1da177e4 | 414 | |
50ec3bbf NP |
415 | /* |
416 | * If the task is already exiting, don't alarm the sysadmin or kill | |
417 | * its children or threads, just set TIF_MEMDIE so it can die quickly | |
418 | */ | |
0753ba01 | 419 | if (p->flags & PF_EXITING) { |
4358997a | 420 | set_tsk_thread_flag(p, TIF_MEMDIE); |
6b0c81b3 | 421 | put_task_struct(p); |
2a1c9b1f | 422 | return; |
50ec3bbf NP |
423 | } |
424 | ||
dc3f21ea | 425 | if (__ratelimit(&oom_rs)) |
8447d950 DR |
426 | dump_header(p, gfp_mask, order, memcg, nodemask); |
427 | ||
5e9d834a | 428 | task_lock(p); |
a63d83f4 | 429 | pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n", |
5e9d834a DR |
430 | message, task_pid_nr(p), p->comm, points); |
431 | task_unlock(p); | |
f3af38d3 | 432 | |
5e9d834a DR |
433 | /* |
434 | * If any of p's children has a different mm and is eligible for kill, | |
11239836 | 435 | * the one with the highest oom_badness() score is sacrificed for its |
5e9d834a DR |
436 | * parent. This attempts to lose the minimal amount of work done while |
437 | * still freeing memory. | |
438 | */ | |
6b0c81b3 | 439 | read_lock(&tasklist_lock); |
dd8e8f40 | 440 | do { |
5e9d834a | 441 | list_for_each_entry(child, &t->children, sibling) { |
a63d83f4 | 442 | unsigned int child_points; |
5e9d834a | 443 | |
edd45544 DR |
444 | if (child->mm == p->mm) |
445 | continue; | |
a63d83f4 DR |
446 | /* |
447 | * oom_badness() returns 0 if the thread is unkillable | |
448 | */ | |
72835c86 | 449 | child_points = oom_badness(child, memcg, nodemask, |
a63d83f4 | 450 | totalpages); |
5e9d834a | 451 | if (child_points > victim_points) { |
6b0c81b3 | 452 | put_task_struct(victim); |
5e9d834a DR |
453 | victim = child; |
454 | victim_points = child_points; | |
6b0c81b3 | 455 | get_task_struct(victim); |
5e9d834a | 456 | } |
dd8e8f40 ON |
457 | } |
458 | } while_each_thread(p, t); | |
6b0c81b3 | 459 | read_unlock(&tasklist_lock); |
dd8e8f40 | 460 | |
6b0c81b3 DR |
461 | rcu_read_lock(); |
462 | p = find_lock_task_mm(victim); | |
463 | if (!p) { | |
464 | rcu_read_unlock(); | |
465 | put_task_struct(victim); | |
647f2bdf | 466 | return; |
6b0c81b3 DR |
467 | } else if (victim != p) { |
468 | get_task_struct(p); | |
469 | put_task_struct(victim); | |
470 | victim = p; | |
471 | } | |
647f2bdf DR |
472 | |
473 | /* mm cannot safely be dereferenced after task_unlock(victim) */ | |
474 | mm = victim->mm; | |
475 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", | |
476 | task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), | |
477 | K(get_mm_counter(victim->mm, MM_ANONPAGES)), | |
478 | K(get_mm_counter(victim->mm, MM_FILEPAGES))); | |
479 | task_unlock(victim); | |
480 | ||
481 | /* | |
482 | * Kill all user processes sharing victim->mm in other thread groups, if | |
483 | * any. They don't get access to memory reserves, though, to avoid | |
484 | * depletion of all memory. This prevents mm->mmap_sem livelock when an | |
485 | * oom killed thread cannot exit because it requires the semaphore and | |
486 | * its contended by another thread trying to allocate memory itself. | |
487 | * That thread will now get access to memory reserves since it has a | |
488 | * pending fatal signal. | |
489 | */ | |
490 | for_each_process(p) | |
491 | if (p->mm == mm && !same_thread_group(p, victim) && | |
492 | !(p->flags & PF_KTHREAD)) { | |
493 | if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) | |
494 | continue; | |
495 | ||
496 | task_lock(p); /* Protect ->comm from prctl() */ | |
497 | pr_err("Kill process %d (%s) sharing same memory\n", | |
498 | task_pid_nr(p), p->comm); | |
499 | task_unlock(p); | |
d2d39309 | 500 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); |
647f2bdf | 501 | } |
6b0c81b3 | 502 | rcu_read_unlock(); |
647f2bdf DR |
503 | |
504 | set_tsk_thread_flag(victim, TIF_MEMDIE); | |
d2d39309 | 505 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); |
6b0c81b3 | 506 | put_task_struct(victim); |
1da177e4 | 507 | } |
647f2bdf | 508 | #undef K |
1da177e4 | 509 | |
309ed882 DR |
510 | /* |
511 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | |
512 | */ | |
876aafbf DR |
513 | void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, |
514 | int order, const nodemask_t *nodemask) | |
309ed882 DR |
515 | { |
516 | if (likely(!sysctl_panic_on_oom)) | |
517 | return; | |
518 | if (sysctl_panic_on_oom != 2) { | |
519 | /* | |
520 | * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel | |
521 | * does not panic for cpuset, mempolicy, or memcg allocation | |
522 | * failures. | |
523 | */ | |
524 | if (constraint != CONSTRAINT_NONE) | |
525 | return; | |
526 | } | |
e85bfd3a | 527 | dump_header(NULL, gfp_mask, order, NULL, nodemask); |
309ed882 DR |
528 | panic("Out of memory: %s panic_on_oom is enabled\n", |
529 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | |
530 | } | |
531 | ||
8bc719d3 MS |
532 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); |
533 | ||
534 | int register_oom_notifier(struct notifier_block *nb) | |
535 | { | |
536 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
537 | } | |
538 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
539 | ||
540 | int unregister_oom_notifier(struct notifier_block *nb) | |
541 | { | |
542 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
543 | } | |
544 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
545 | ||
098d7f12 DR |
546 | /* |
547 | * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero | |
548 | * if a parallel OOM killing is already taking place that includes a zone in | |
549 | * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. | |
550 | */ | |
ff321fea | 551 | int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) |
098d7f12 | 552 | { |
dd1a239f MG |
553 | struct zoneref *z; |
554 | struct zone *zone; | |
098d7f12 DR |
555 | int ret = 1; |
556 | ||
c7d4caeb | 557 | spin_lock(&zone_scan_lock); |
dd1a239f MG |
558 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
559 | if (zone_is_oom_locked(zone)) { | |
098d7f12 DR |
560 | ret = 0; |
561 | goto out; | |
562 | } | |
dd1a239f MG |
563 | } |
564 | ||
565 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | |
566 | /* | |
c7d4caeb | 567 | * Lock each zone in the zonelist under zone_scan_lock so a |
ff321fea | 568 | * parallel invocation of try_set_zonelist_oom() doesn't succeed |
dd1a239f MG |
569 | * when it shouldn't. |
570 | */ | |
571 | zone_set_flag(zone, ZONE_OOM_LOCKED); | |
572 | } | |
098d7f12 | 573 | |
098d7f12 | 574 | out: |
c7d4caeb | 575 | spin_unlock(&zone_scan_lock); |
098d7f12 DR |
576 | return ret; |
577 | } | |
578 | ||
579 | /* | |
580 | * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed | |
581 | * allocation attempts with zonelists containing them may now recall the OOM | |
582 | * killer, if necessary. | |
583 | */ | |
dd1a239f | 584 | void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) |
098d7f12 | 585 | { |
dd1a239f MG |
586 | struct zoneref *z; |
587 | struct zone *zone; | |
098d7f12 | 588 | |
c7d4caeb | 589 | spin_lock(&zone_scan_lock); |
dd1a239f MG |
590 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
591 | zone_clear_flag(zone, ZONE_OOM_LOCKED); | |
592 | } | |
c7d4caeb | 593 | spin_unlock(&zone_scan_lock); |
098d7f12 DR |
594 | } |
595 | ||
1da177e4 | 596 | /** |
6937a25c | 597 | * out_of_memory - kill the "best" process when we run out of memory |
1b578df0 RD |
598 | * @zonelist: zonelist pointer |
599 | * @gfp_mask: memory allocation flags | |
600 | * @order: amount of memory being requested as a power of 2 | |
6f48d0eb | 601 | * @nodemask: nodemask passed to page allocator |
08ab9b10 | 602 | * @force_kill: true if a task must be killed, even if others are exiting |
1da177e4 LT |
603 | * |
604 | * If we run out of memory, we have the choice between either | |
605 | * killing a random task (bad), letting the system crash (worse) | |
606 | * OR try to be smart about which process to kill. Note that we | |
607 | * don't have to be perfect here, we just have to be good. | |
608 | */ | |
4365a567 | 609 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
08ab9b10 | 610 | int order, nodemask_t *nodemask, bool force_kill) |
1da177e4 | 611 | { |
e85bfd3a | 612 | const nodemask_t *mpol_mask; |
0aad4b31 | 613 | struct task_struct *p; |
a63d83f4 | 614 | unsigned long totalpages; |
8bc719d3 | 615 | unsigned long freed = 0; |
9cbb78bb | 616 | unsigned int uninitialized_var(points); |
e3658932 | 617 | enum oom_constraint constraint = CONSTRAINT_NONE; |
b52723c5 | 618 | int killed = 0; |
8bc719d3 MS |
619 | |
620 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | |
621 | if (freed > 0) | |
622 | /* Got some memory back in the last second. */ | |
623 | return; | |
1da177e4 | 624 | |
7b98c2e4 | 625 | /* |
9ff4868e DR |
626 | * If current has a pending SIGKILL or is exiting, then automatically |
627 | * select it. The goal is to allow it to allocate so that it may | |
628 | * quickly exit and free its memory. | |
7b98c2e4 | 629 | */ |
9ff4868e | 630 | if (fatal_signal_pending(current) || current->flags & PF_EXITING) { |
7b98c2e4 DR |
631 | set_thread_flag(TIF_MEMDIE); |
632 | return; | |
633 | } | |
634 | ||
9b0f8b04 CL |
635 | /* |
636 | * Check if there were limitations on the allocation (only relevant for | |
637 | * NUMA) that may require different handling. | |
638 | */ | |
a63d83f4 DR |
639 | constraint = constrained_alloc(zonelist, gfp_mask, nodemask, |
640 | &totalpages); | |
e85bfd3a DR |
641 | mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; |
642 | check_panic_on_oom(constraint, gfp_mask, order, mpol_mask); | |
0aad4b31 | 643 | |
121d1ba0 | 644 | if (sysctl_oom_kill_allocating_task && current->mm && |
a96cfd6e | 645 | !oom_unkillable_task(current, NULL, nodemask) && |
121d1ba0 | 646 | current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { |
6b0c81b3 | 647 | get_task_struct(current); |
2a1c9b1f DR |
648 | oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL, |
649 | nodemask, | |
650 | "Out of memory (oom_kill_allocating_task)"); | |
651 | goto out; | |
0aad4b31 DR |
652 | } |
653 | ||
9cbb78bb | 654 | p = select_bad_process(&points, totalpages, mpol_mask, force_kill); |
0aad4b31 DR |
655 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
656 | if (!p) { | |
e85bfd3a | 657 | dump_header(NULL, gfp_mask, order, NULL, mpol_mask); |
0aad4b31 DR |
658 | panic("Out of memory and no killable processes...\n"); |
659 | } | |
6b4f2b56 | 660 | if (p != (void *)-1UL) { |
2a1c9b1f DR |
661 | oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, |
662 | nodemask, "Out of memory"); | |
663 | killed = 1; | |
664 | } | |
b52723c5 | 665 | out: |
1da177e4 | 666 | /* |
4f774b91 DR |
667 | * Give the killed threads a good chance of exiting before trying to |
668 | * allocate memory again. | |
1da177e4 | 669 | */ |
4f774b91 DR |
670 | if (killed) |
671 | schedule_timeout_killable(1); | |
1da177e4 | 672 | } |
e3658932 DR |
673 | |
674 | /* | |
675 | * The pagefault handler calls here because it is out of memory, so kill a | |
efacd02e DR |
676 | * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a |
677 | * parallel oom killing is already in progress so do nothing. | |
e3658932 DR |
678 | */ |
679 | void pagefault_out_of_memory(void) | |
680 | { | |
efacd02e DR |
681 | struct zonelist *zonelist = node_zonelist(first_online_node, |
682 | GFP_KERNEL); | |
683 | ||
684 | if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) { | |
08ab9b10 | 685 | out_of_memory(NULL, 0, 0, NULL, false); |
efacd02e | 686 | clear_zonelist_oom(zonelist, GFP_KERNEL); |
e3658932 | 687 | } |
e3658932 | 688 | } |