]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/oom_kill.c | |
3 | * | |
4 | * Copyright (C) 1998,2000 Rik van Riel | |
5 | * Thanks go out to Claus Fischer for some serious inspiration and | |
6 | * for goading me into coding this file... | |
7 | * | |
8 | * The routines in this file are used to kill a process when | |
9 | * we're seriously out of memory. This gets called from __alloc_pages() | |
10 | * in mm/page_alloc.c when we really run out of memory. | |
11 | * | |
12 | * Since we won't call these routines often (on a well-configured | |
13 | * machine) this file will double as a 'coding guide' and a signpost | |
14 | * for newbie kernel hackers. It features several pointers to major | |
15 | * kernel subsystems and hints as to where to find out what things do. | |
16 | */ | |
17 | ||
18 | #include <linux/oom.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/swap.h> | |
22 | #include <linux/timex.h> | |
23 | #include <linux/jiffies.h> | |
24 | #include <linux/cpuset.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/notifier.h> | |
27 | ||
28 | int sysctl_panic_on_oom; | |
29 | /* #define DEBUG */ | |
30 | ||
31 | /** | |
32 | * badness - calculate a numeric value for how bad this task has been | |
33 | * @p: task struct of which task we should calculate | |
34 | * @uptime: current uptime in seconds | |
35 | * | |
36 | * The formula used is relatively simple and documented inline in the | |
37 | * function. The main rationale is that we want to select a good task | |
38 | * to kill when we run out of memory. | |
39 | * | |
40 | * Good in this context means that: | |
41 | * 1) we lose the minimum amount of work done | |
42 | * 2) we recover a large amount of memory | |
43 | * 3) we don't kill anything innocent of eating tons of memory | |
44 | * 4) we want to kill the minimum amount of processes (one) | |
45 | * 5) we try to kill the process the user expects us to kill, this | |
46 | * algorithm has been meticulously tuned to meet the principle | |
47 | * of least surprise ... (be careful when you change it) | |
48 | */ | |
49 | ||
50 | unsigned long badness(struct task_struct *p, unsigned long uptime) | |
51 | { | |
52 | unsigned long points, cpu_time, run_time, s; | |
53 | struct mm_struct *mm; | |
54 | struct task_struct *child; | |
55 | ||
56 | task_lock(p); | |
57 | mm = p->mm; | |
58 | if (!mm) { | |
59 | task_unlock(p); | |
60 | return 0; | |
61 | } | |
62 | ||
63 | /* | |
64 | * The memory size of the process is the basis for the badness. | |
65 | */ | |
66 | points = mm->total_vm; | |
67 | ||
68 | /* | |
69 | * After this unlock we can no longer dereference local variable `mm' | |
70 | */ | |
71 | task_unlock(p); | |
72 | ||
73 | /* | |
74 | * swapoff can easily use up all memory, so kill those first. | |
75 | */ | |
76 | if (p->flags & PF_SWAPOFF) | |
77 | return ULONG_MAX; | |
78 | ||
79 | /* | |
80 | * Processes which fork a lot of child processes are likely | |
81 | * a good choice. We add half the vmsize of the children if they | |
82 | * have an own mm. This prevents forking servers to flood the | |
83 | * machine with an endless amount of children. In case a single | |
84 | * child is eating the vast majority of memory, adding only half | |
85 | * to the parents will make the child our kill candidate of choice. | |
86 | */ | |
87 | list_for_each_entry(child, &p->children, sibling) { | |
88 | task_lock(child); | |
89 | if (child->mm != mm && child->mm) | |
90 | points += child->mm->total_vm/2 + 1; | |
91 | task_unlock(child); | |
92 | } | |
93 | ||
94 | /* | |
95 | * CPU time is in tens of seconds and run time is in thousands | |
96 | * of seconds. There is no particular reason for this other than | |
97 | * that it turned out to work very well in practice. | |
98 | */ | |
99 | cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime)) | |
100 | >> (SHIFT_HZ + 3); | |
101 | ||
102 | if (uptime >= p->start_time.tv_sec) | |
103 | run_time = (uptime - p->start_time.tv_sec) >> 10; | |
104 | else | |
105 | run_time = 0; | |
106 | ||
107 | s = int_sqrt(cpu_time); | |
108 | if (s) | |
109 | points /= s; | |
110 | s = int_sqrt(int_sqrt(run_time)); | |
111 | if (s) | |
112 | points /= s; | |
113 | ||
114 | /* | |
115 | * Niced processes are most likely less important, so double | |
116 | * their badness points. | |
117 | */ | |
118 | if (task_nice(p) > 0) | |
119 | points *= 2; | |
120 | ||
121 | /* | |
122 | * Superuser processes are usually more important, so we make it | |
123 | * less likely that we kill those. | |
124 | */ | |
125 | if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_ADMIN) || | |
126 | p->uid == 0 || p->euid == 0) | |
127 | points /= 4; | |
128 | ||
129 | /* | |
130 | * We don't want to kill a process with direct hardware access. | |
131 | * Not only could that mess up the hardware, but usually users | |
132 | * tend to only have this flag set on applications they think | |
133 | * of as important. | |
134 | */ | |
135 | if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO)) | |
136 | points /= 4; | |
137 | ||
138 | /* | |
139 | * If p's nodes don't overlap ours, it may still help to kill p | |
140 | * because p may have allocated or otherwise mapped memory on | |
141 | * this node before. However it will be less likely. | |
142 | */ | |
143 | if (!cpuset_excl_nodes_overlap(p)) | |
144 | points /= 8; | |
145 | ||
146 | /* | |
147 | * Adjust the score by oomkilladj. | |
148 | */ | |
149 | if (p->oomkilladj) { | |
150 | if (p->oomkilladj > 0) { | |
151 | if (!points) | |
152 | points = 1; | |
153 | points <<= p->oomkilladj; | |
154 | } else | |
155 | points >>= -(p->oomkilladj); | |
156 | } | |
157 | ||
158 | #ifdef DEBUG | |
159 | printk(KERN_DEBUG "OOMkill: task %d (%s) got %d points\n", | |
160 | p->pid, p->comm, points); | |
161 | #endif | |
162 | return points; | |
163 | } | |
164 | ||
165 | /* | |
166 | * Types of limitations to the nodes from which allocations may occur | |
167 | */ | |
168 | #define CONSTRAINT_NONE 1 | |
169 | #define CONSTRAINT_MEMORY_POLICY 2 | |
170 | #define CONSTRAINT_CPUSET 3 | |
171 | ||
172 | /* | |
173 | * Determine the type of allocation constraint. | |
174 | */ | |
175 | static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) | |
176 | { | |
177 | #ifdef CONFIG_NUMA | |
178 | struct zone **z; | |
179 | nodemask_t nodes; | |
180 | int node; | |
181 | ||
182 | nodes_clear(nodes); | |
183 | /* node has memory ? */ | |
184 | for_each_online_node(node) | |
185 | if (NODE_DATA(node)->node_present_pages) | |
186 | node_set(node, nodes); | |
187 | ||
188 | for (z = zonelist->zones; *z; z++) | |
189 | if (cpuset_zone_allowed_softwall(*z, gfp_mask)) | |
190 | node_clear(zone_to_nid(*z), nodes); | |
191 | else | |
192 | return CONSTRAINT_CPUSET; | |
193 | ||
194 | if (!nodes_empty(nodes)) | |
195 | return CONSTRAINT_MEMORY_POLICY; | |
196 | #endif | |
197 | ||
198 | return CONSTRAINT_NONE; | |
199 | } | |
200 | ||
201 | /* | |
202 | * Simple selection loop. We chose the process with the highest | |
203 | * number of 'points'. We expect the caller will lock the tasklist. | |
204 | * | |
205 | * (not docbooked, we don't want this one cluttering up the manual) | |
206 | */ | |
207 | static struct task_struct *select_bad_process(unsigned long *ppoints) | |
208 | { | |
209 | struct task_struct *g, *p; | |
210 | struct task_struct *chosen = NULL; | |
211 | struct timespec uptime; | |
212 | *ppoints = 0; | |
213 | ||
214 | do_posix_clock_monotonic_gettime(&uptime); | |
215 | do_each_thread(g, p) { | |
216 | unsigned long points; | |
217 | ||
218 | /* | |
219 | * skip kernel threads and tasks which have already released | |
220 | * their mm. | |
221 | */ | |
222 | if (!p->mm) | |
223 | continue; | |
224 | /* skip the init task */ | |
225 | if (is_init(p)) | |
226 | continue; | |
227 | ||
228 | /* | |
229 | * This task already has access to memory reserves and is | |
230 | * being killed. Don't allow any other task access to the | |
231 | * memory reserve. | |
232 | * | |
233 | * Note: this may have a chance of deadlock if it gets | |
234 | * blocked waiting for another task which itself is waiting | |
235 | * for memory. Is there a better alternative? | |
236 | */ | |
237 | if (test_tsk_thread_flag(p, TIF_MEMDIE)) | |
238 | return ERR_PTR(-1UL); | |
239 | ||
240 | /* | |
241 | * This is in the process of releasing memory so wait for it | |
242 | * to finish before killing some other task by mistake. | |
243 | * | |
244 | * However, if p is the current task, we allow the 'kill' to | |
245 | * go ahead if it is exiting: this will simply set TIF_MEMDIE, | |
246 | * which will allow it to gain access to memory reserves in | |
247 | * the process of exiting and releasing its resources. | |
248 | * Otherwise we could get an easy OOM deadlock. | |
249 | */ | |
250 | if (p->flags & PF_EXITING) { | |
251 | if (p != current) | |
252 | return ERR_PTR(-1UL); | |
253 | ||
254 | chosen = p; | |
255 | *ppoints = ULONG_MAX; | |
256 | } | |
257 | ||
258 | if (p->oomkilladj == OOM_DISABLE) | |
259 | continue; | |
260 | ||
261 | points = badness(p, uptime.tv_sec); | |
262 | if (points > *ppoints || !chosen) { | |
263 | chosen = p; | |
264 | *ppoints = points; | |
265 | } | |
266 | } while_each_thread(g, p); | |
267 | ||
268 | return chosen; | |
269 | } | |
270 | ||
271 | /** | |
272 | * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO | |
273 | * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO | |
274 | * set. | |
275 | */ | |
276 | static void __oom_kill_task(struct task_struct *p, int verbose) | |
277 | { | |
278 | if (is_init(p)) { | |
279 | WARN_ON(1); | |
280 | printk(KERN_WARNING "tried to kill init!\n"); | |
281 | return; | |
282 | } | |
283 | ||
284 | if (!p->mm) { | |
285 | WARN_ON(1); | |
286 | printk(KERN_WARNING "tried to kill an mm-less task!\n"); | |
287 | return; | |
288 | } | |
289 | ||
290 | if (verbose) | |
291 | printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm); | |
292 | ||
293 | /* | |
294 | * We give our sacrificial lamb high priority and access to | |
295 | * all the memory it needs. That way it should be able to | |
296 | * exit() and clear out its resources quickly... | |
297 | */ | |
298 | p->time_slice = HZ; | |
299 | set_tsk_thread_flag(p, TIF_MEMDIE); | |
300 | ||
301 | force_sig(SIGKILL, p); | |
302 | } | |
303 | ||
304 | static int oom_kill_task(struct task_struct *p) | |
305 | { | |
306 | struct mm_struct *mm; | |
307 | struct task_struct *g, *q; | |
308 | ||
309 | mm = p->mm; | |
310 | ||
311 | /* WARNING: mm may not be dereferenced since we did not obtain its | |
312 | * value from get_task_mm(p). This is OK since all we need to do is | |
313 | * compare mm to q->mm below. | |
314 | * | |
315 | * Furthermore, even if mm contains a non-NULL value, p->mm may | |
316 | * change to NULL at any time since we do not hold task_lock(p). | |
317 | * However, this is of no concern to us. | |
318 | */ | |
319 | ||
320 | if (mm == NULL) | |
321 | return 1; | |
322 | ||
323 | /* | |
324 | * Don't kill the process if any threads are set to OOM_DISABLE | |
325 | */ | |
326 | do_each_thread(g, q) { | |
327 | if (q->mm == mm && q->oomkilladj == OOM_DISABLE) | |
328 | return 1; | |
329 | } while_each_thread(g, q); | |
330 | ||
331 | __oom_kill_task(p, 1); | |
332 | ||
333 | /* | |
334 | * kill all processes that share the ->mm (i.e. all threads), | |
335 | * but are in a different thread group. Don't let them have access | |
336 | * to memory reserves though, otherwise we might deplete all memory. | |
337 | */ | |
338 | do_each_thread(g, q) { | |
339 | if (q->mm == mm && q->tgid != p->tgid) | |
340 | force_sig(SIGKILL, q); | |
341 | } while_each_thread(g, q); | |
342 | ||
343 | return 0; | |
344 | } | |
345 | ||
346 | static int oom_kill_process(struct task_struct *p, unsigned long points, | |
347 | const char *message) | |
348 | { | |
349 | struct task_struct *c; | |
350 | struct list_head *tsk; | |
351 | ||
352 | /* | |
353 | * If the task is already exiting, don't alarm the sysadmin or kill | |
354 | * its children or threads, just set TIF_MEMDIE so it can die quickly | |
355 | */ | |
356 | if (p->flags & PF_EXITING) { | |
357 | __oom_kill_task(p, 0); | |
358 | return 0; | |
359 | } | |
360 | ||
361 | printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n", | |
362 | message, p->pid, p->comm, points); | |
363 | ||
364 | /* Try to kill a child first */ | |
365 | list_for_each(tsk, &p->children) { | |
366 | c = list_entry(tsk, struct task_struct, sibling); | |
367 | if (c->mm == p->mm) | |
368 | continue; | |
369 | if (!oom_kill_task(c)) | |
370 | return 0; | |
371 | } | |
372 | return oom_kill_task(p); | |
373 | } | |
374 | ||
375 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); | |
376 | ||
377 | int register_oom_notifier(struct notifier_block *nb) | |
378 | { | |
379 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
380 | } | |
381 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
382 | ||
383 | int unregister_oom_notifier(struct notifier_block *nb) | |
384 | { | |
385 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
386 | } | |
387 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
388 | ||
389 | /** | |
390 | * out_of_memory - kill the "best" process when we run out of memory | |
391 | * | |
392 | * If we run out of memory, we have the choice between either | |
393 | * killing a random task (bad), letting the system crash (worse) | |
394 | * OR try to be smart about which process to kill. Note that we | |
395 | * don't have to be perfect here, we just have to be good. | |
396 | */ | |
397 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) | |
398 | { | |
399 | struct task_struct *p; | |
400 | unsigned long points = 0; | |
401 | unsigned long freed = 0; | |
402 | int constraint; | |
403 | ||
404 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | |
405 | if (freed > 0) | |
406 | /* Got some memory back in the last second. */ | |
407 | return; | |
408 | ||
409 | if (printk_ratelimit()) { | |
410 | printk(KERN_WARNING "%s invoked oom-killer: " | |
411 | "gfp_mask=0x%x, order=%d, oomkilladj=%d\n", | |
412 | current->comm, gfp_mask, order, current->oomkilladj); | |
413 | dump_stack(); | |
414 | show_mem(); | |
415 | } | |
416 | ||
417 | if (sysctl_panic_on_oom == 2) | |
418 | panic("out of memory. Compulsory panic_on_oom is selected.\n"); | |
419 | ||
420 | /* | |
421 | * Check if there were limitations on the allocation (only relevant for | |
422 | * NUMA) that may require different handling. | |
423 | */ | |
424 | constraint = constrained_alloc(zonelist, gfp_mask); | |
425 | cpuset_lock(); | |
426 | read_lock(&tasklist_lock); | |
427 | ||
428 | switch (constraint) { | |
429 | case CONSTRAINT_MEMORY_POLICY: | |
430 | oom_kill_process(current, points, | |
431 | "No available memory (MPOL_BIND)"); | |
432 | break; | |
433 | ||
434 | case CONSTRAINT_CPUSET: | |
435 | oom_kill_process(current, points, | |
436 | "No available memory in cpuset"); | |
437 | break; | |
438 | ||
439 | case CONSTRAINT_NONE: | |
440 | if (sysctl_panic_on_oom) | |
441 | panic("out of memory. panic_on_oom is selected\n"); | |
442 | retry: | |
443 | /* | |
444 | * Rambo mode: Shoot down a process and hope it solves whatever | |
445 | * issues we may have. | |
446 | */ | |
447 | p = select_bad_process(&points); | |
448 | ||
449 | if (PTR_ERR(p) == -1UL) | |
450 | goto out; | |
451 | ||
452 | /* Found nothing?!?! Either we hang forever, or we panic. */ | |
453 | if (!p) { | |
454 | read_unlock(&tasklist_lock); | |
455 | cpuset_unlock(); | |
456 | panic("Out of memory and no killable processes...\n"); | |
457 | } | |
458 | ||
459 | if (oom_kill_process(p, points, "Out of memory")) | |
460 | goto retry; | |
461 | ||
462 | break; | |
463 | } | |
464 | ||
465 | out: | |
466 | read_unlock(&tasklist_lock); | |
467 | cpuset_unlock(); | |
468 | ||
469 | /* | |
470 | * Give "p" a good chance of killing itself before we | |
471 | * retry to allocate memory unless "p" is current | |
472 | */ | |
473 | if (!test_thread_flag(TIF_MEMDIE)) | |
474 | schedule_timeout_uninterruptible(1); | |
475 | } |