]> Git Repo - linux.git/blob - kernel/exit.c
mm/migrate_device.c: refactor migrate_vma and migrate_deivce_coherent_page()
[linux.git] / kernel / exit.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/exit.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/sched/autogroup.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/stat.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/sched/cputime.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/capability.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/tty.h>
22 #include <linux/iocontext.h>
23 #include <linux/key.h>
24 #include <linux/cpu.h>
25 #include <linux/acct.h>
26 #include <linux/tsacct_kern.h>
27 #include <linux/file.h>
28 #include <linux/fdtable.h>
29 #include <linux/freezer.h>
30 #include <linux/binfmts.h>
31 #include <linux/nsproxy.h>
32 #include <linux/pid_namespace.h>
33 #include <linux/ptrace.h>
34 #include <linux/profile.h>
35 #include <linux/mount.h>
36 #include <linux/proc_fs.h>
37 #include <linux/kthread.h>
38 #include <linux/mempolicy.h>
39 #include <linux/taskstats_kern.h>
40 #include <linux/delayacct.h>
41 #include <linux/cgroup.h>
42 #include <linux/syscalls.h>
43 #include <linux/signal.h>
44 #include <linux/posix-timers.h>
45 #include <linux/cn_proc.h>
46 #include <linux/mutex.h>
47 #include <linux/futex.h>
48 #include <linux/pipe_fs_i.h>
49 #include <linux/audit.h> /* for audit_free() */
50 #include <linux/resource.h>
51 #include <linux/task_io_accounting_ops.h>
52 #include <linux/blkdev.h>
53 #include <linux/task_work.h>
54 #include <linux/fs_struct.h>
55 #include <linux/init_task.h>
56 #include <linux/perf_event.h>
57 #include <trace/events/sched.h>
58 #include <linux/hw_breakpoint.h>
59 #include <linux/oom.h>
60 #include <linux/writeback.h>
61 #include <linux/shm.h>
62 #include <linux/kcov.h>
63 #include <linux/kmsan.h>
64 #include <linux/random.h>
65 #include <linux/rcuwait.h>
66 #include <linux/compat.h>
67 #include <linux/io_uring.h>
68 #include <linux/kprobes.h>
69 #include <linux/rethook.h>
70
71 #include <linux/uaccess.h>
72 #include <asm/unistd.h>
73 #include <asm/mmu_context.h>
74
75 static void __unhash_process(struct task_struct *p, bool group_dead)
76 {
77         nr_threads--;
78         detach_pid(p, PIDTYPE_PID);
79         if (group_dead) {
80                 detach_pid(p, PIDTYPE_TGID);
81                 detach_pid(p, PIDTYPE_PGID);
82                 detach_pid(p, PIDTYPE_SID);
83
84                 list_del_rcu(&p->tasks);
85                 list_del_init(&p->sibling);
86                 __this_cpu_dec(process_counts);
87         }
88         list_del_rcu(&p->thread_group);
89         list_del_rcu(&p->thread_node);
90 }
91
92 /*
93  * This function expects the tasklist_lock write-locked.
94  */
95 static void __exit_signal(struct task_struct *tsk)
96 {
97         struct signal_struct *sig = tsk->signal;
98         bool group_dead = thread_group_leader(tsk);
99         struct sighand_struct *sighand;
100         struct tty_struct *tty;
101         u64 utime, stime;
102
103         sighand = rcu_dereference_check(tsk->sighand,
104                                         lockdep_tasklist_lock_is_held());
105         spin_lock(&sighand->siglock);
106
107 #ifdef CONFIG_POSIX_TIMERS
108         posix_cpu_timers_exit(tsk);
109         if (group_dead)
110                 posix_cpu_timers_exit_group(tsk);
111 #endif
112
113         if (group_dead) {
114                 tty = sig->tty;
115                 sig->tty = NULL;
116         } else {
117                 /*
118                  * If there is any task waiting for the group exit
119                  * then notify it:
120                  */
121                 if (sig->notify_count > 0 && !--sig->notify_count)
122                         wake_up_process(sig->group_exec_task);
123
124                 if (tsk == sig->curr_target)
125                         sig->curr_target = next_thread(tsk);
126         }
127
128         add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
129                               sizeof(unsigned long long));
130
131         /*
132          * Accumulate here the counters for all threads as they die. We could
133          * skip the group leader because it is the last user of signal_struct,
134          * but we want to avoid the race with thread_group_cputime() which can
135          * see the empty ->thread_head list.
136          */
137         task_cputime(tsk, &utime, &stime);
138         write_seqlock(&sig->stats_lock);
139         sig->utime += utime;
140         sig->stime += stime;
141         sig->gtime += task_gtime(tsk);
142         sig->min_flt += tsk->min_flt;
143         sig->maj_flt += tsk->maj_flt;
144         sig->nvcsw += tsk->nvcsw;
145         sig->nivcsw += tsk->nivcsw;
146         sig->inblock += task_io_get_inblock(tsk);
147         sig->oublock += task_io_get_oublock(tsk);
148         task_io_accounting_add(&sig->ioac, &tsk->ioac);
149         sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
150         sig->nr_threads--;
151         __unhash_process(tsk, group_dead);
152         write_sequnlock(&sig->stats_lock);
153
154         /*
155          * Do this under ->siglock, we can race with another thread
156          * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
157          */
158         flush_sigqueue(&tsk->pending);
159         tsk->sighand = NULL;
160         spin_unlock(&sighand->siglock);
161
162         __cleanup_sighand(sighand);
163         clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
164         if (group_dead) {
165                 flush_sigqueue(&sig->shared_pending);
166                 tty_kref_put(tty);
167         }
168 }
169
170 static void delayed_put_task_struct(struct rcu_head *rhp)
171 {
172         struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
173
174         kprobe_flush_task(tsk);
175         rethook_flush_task(tsk);
176         perf_event_delayed_put(tsk);
177         trace_sched_process_free(tsk);
178         put_task_struct(tsk);
179 }
180
181 void put_task_struct_rcu_user(struct task_struct *task)
182 {
183         if (refcount_dec_and_test(&task->rcu_users))
184                 call_rcu(&task->rcu, delayed_put_task_struct);
185 }
186
187 void release_task(struct task_struct *p)
188 {
189         struct task_struct *leader;
190         struct pid *thread_pid;
191         int zap_leader;
192 repeat:
193         /* don't need to get the RCU readlock here - the process is dead and
194          * can't be modifying its own credentials. But shut RCU-lockdep up */
195         rcu_read_lock();
196         dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
197         rcu_read_unlock();
198
199         cgroup_release(p);
200
201         write_lock_irq(&tasklist_lock);
202         ptrace_release_task(p);
203         thread_pid = get_pid(p->thread_pid);
204         __exit_signal(p);
205
206         /*
207          * If we are the last non-leader member of the thread
208          * group, and the leader is zombie, then notify the
209          * group leader's parent process. (if it wants notification.)
210          */
211         zap_leader = 0;
212         leader = p->group_leader;
213         if (leader != p && thread_group_empty(leader)
214                         && leader->exit_state == EXIT_ZOMBIE) {
215                 /*
216                  * If we were the last child thread and the leader has
217                  * exited already, and the leader's parent ignores SIGCHLD,
218                  * then we are the one who should release the leader.
219                  */
220                 zap_leader = do_notify_parent(leader, leader->exit_signal);
221                 if (zap_leader)
222                         leader->exit_state = EXIT_DEAD;
223         }
224
225         write_unlock_irq(&tasklist_lock);
226         seccomp_filter_release(p);
227         proc_flush_pid(thread_pid);
228         put_pid(thread_pid);
229         release_thread(p);
230         put_task_struct_rcu_user(p);
231
232         p = leader;
233         if (unlikely(zap_leader))
234                 goto repeat;
235 }
236
237 int rcuwait_wake_up(struct rcuwait *w)
238 {
239         int ret = 0;
240         struct task_struct *task;
241
242         rcu_read_lock();
243
244         /*
245          * Order condition vs @task, such that everything prior to the load
246          * of @task is visible. This is the condition as to why the user called
247          * rcuwait_wake() in the first place. Pairs with set_current_state()
248          * barrier (A) in rcuwait_wait_event().
249          *
250          *    WAIT                WAKE
251          *    [S] tsk = current   [S] cond = true
252          *        MB (A)              MB (B)
253          *    [L] cond            [L] tsk
254          */
255         smp_mb(); /* (B) */
256
257         task = rcu_dereference(w->task);
258         if (task)
259                 ret = wake_up_process(task);
260         rcu_read_unlock();
261
262         return ret;
263 }
264 EXPORT_SYMBOL_GPL(rcuwait_wake_up);
265
266 /*
267  * Determine if a process group is "orphaned", according to the POSIX
268  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
269  * by terminal-generated stop signals.  Newly orphaned process groups are
270  * to receive a SIGHUP and a SIGCONT.
271  *
272  * "I ask you, have you ever known what it is to be an orphan?"
273  */
274 static int will_become_orphaned_pgrp(struct pid *pgrp,
275                                         struct task_struct *ignored_task)
276 {
277         struct task_struct *p;
278
279         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
280                 if ((p == ignored_task) ||
281                     (p->exit_state && thread_group_empty(p)) ||
282                     is_global_init(p->real_parent))
283                         continue;
284
285                 if (task_pgrp(p->real_parent) != pgrp &&
286                     task_session(p->real_parent) == task_session(p))
287                         return 0;
288         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
289
290         return 1;
291 }
292
293 int is_current_pgrp_orphaned(void)
294 {
295         int retval;
296
297         read_lock(&tasklist_lock);
298         retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
299         read_unlock(&tasklist_lock);
300
301         return retval;
302 }
303
304 static bool has_stopped_jobs(struct pid *pgrp)
305 {
306         struct task_struct *p;
307
308         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
309                 if (p->signal->flags & SIGNAL_STOP_STOPPED)
310                         return true;
311         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
312
313         return false;
314 }
315
316 /*
317  * Check to see if any process groups have become orphaned as
318  * a result of our exiting, and if they have any stopped jobs,
319  * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
320  */
321 static void
322 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
323 {
324         struct pid *pgrp = task_pgrp(tsk);
325         struct task_struct *ignored_task = tsk;
326
327         if (!parent)
328                 /* exit: our father is in a different pgrp than
329                  * we are and we were the only connection outside.
330                  */
331                 parent = tsk->real_parent;
332         else
333                 /* reparent: our child is in a different pgrp than
334                  * we are, and it was the only connection outside.
335                  */
336                 ignored_task = NULL;
337
338         if (task_pgrp(parent) != pgrp &&
339             task_session(parent) == task_session(tsk) &&
340             will_become_orphaned_pgrp(pgrp, ignored_task) &&
341             has_stopped_jobs(pgrp)) {
342                 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
343                 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
344         }
345 }
346
347 static void coredump_task_exit(struct task_struct *tsk)
348 {
349         struct core_state *core_state;
350
351         /*
352          * Serialize with any possible pending coredump.
353          * We must hold siglock around checking core_state
354          * and setting PF_POSTCOREDUMP.  The core-inducing thread
355          * will increment ->nr_threads for each thread in the
356          * group without PF_POSTCOREDUMP set.
357          */
358         spin_lock_irq(&tsk->sighand->siglock);
359         tsk->flags |= PF_POSTCOREDUMP;
360         core_state = tsk->signal->core_state;
361         spin_unlock_irq(&tsk->sighand->siglock);
362         if (core_state) {
363                 struct core_thread self;
364
365                 self.task = current;
366                 if (self.task->flags & PF_SIGNALED)
367                         self.next = xchg(&core_state->dumper.next, &self);
368                 else
369                         self.task = NULL;
370                 /*
371                  * Implies mb(), the result of xchg() must be visible
372                  * to core_state->dumper.
373                  */
374                 if (atomic_dec_and_test(&core_state->nr_threads))
375                         complete(&core_state->startup);
376
377                 for (;;) {
378                         set_current_state(TASK_UNINTERRUPTIBLE);
379                         if (!self.task) /* see coredump_finish() */
380                                 break;
381                         freezable_schedule();
382                 }
383                 __set_current_state(TASK_RUNNING);
384         }
385 }
386
387 #ifdef CONFIG_MEMCG
388 /*
389  * A task is exiting.   If it owned this mm, find a new owner for the mm.
390  */
391 void mm_update_next_owner(struct mm_struct *mm)
392 {
393         struct task_struct *c, *g, *p = current;
394
395 retry:
396         /*
397          * If the exiting or execing task is not the owner, it's
398          * someone else's problem.
399          */
400         if (mm->owner != p)
401                 return;
402         /*
403          * The current owner is exiting/execing and there are no other
404          * candidates.  Do not leave the mm pointing to a possibly
405          * freed task structure.
406          */
407         if (atomic_read(&mm->mm_users) <= 1) {
408                 WRITE_ONCE(mm->owner, NULL);
409                 return;
410         }
411
412         read_lock(&tasklist_lock);
413         /*
414          * Search in the children
415          */
416         list_for_each_entry(c, &p->children, sibling) {
417                 if (c->mm == mm)
418                         goto assign_new_owner;
419         }
420
421         /*
422          * Search in the siblings
423          */
424         list_for_each_entry(c, &p->real_parent->children, sibling) {
425                 if (c->mm == mm)
426                         goto assign_new_owner;
427         }
428
429         /*
430          * Search through everything else, we should not get here often.
431          */
432         for_each_process(g) {
433                 if (g->flags & PF_KTHREAD)
434                         continue;
435                 for_each_thread(g, c) {
436                         if (c->mm == mm)
437                                 goto assign_new_owner;
438                         if (c->mm)
439                                 break;
440                 }
441         }
442         read_unlock(&tasklist_lock);
443         /*
444          * We found no owner yet mm_users > 1: this implies that we are
445          * most likely racing with swapoff (try_to_unuse()) or /proc or
446          * ptrace or page migration (get_task_mm()).  Mark owner as NULL.
447          */
448         WRITE_ONCE(mm->owner, NULL);
449         return;
450
451 assign_new_owner:
452         BUG_ON(c == p);
453         get_task_struct(c);
454         /*
455          * The task_lock protects c->mm from changing.
456          * We always want mm->owner->mm == mm
457          */
458         task_lock(c);
459         /*
460          * Delay read_unlock() till we have the task_lock()
461          * to ensure that c does not slip away underneath us
462          */
463         read_unlock(&tasklist_lock);
464         if (c->mm != mm) {
465                 task_unlock(c);
466                 put_task_struct(c);
467                 goto retry;
468         }
469         WRITE_ONCE(mm->owner, c);
470         lru_gen_migrate_mm(mm);
471         task_unlock(c);
472         put_task_struct(c);
473 }
474 #endif /* CONFIG_MEMCG */
475
476 /*
477  * Turn us into a lazy TLB process if we
478  * aren't already..
479  */
480 static void exit_mm(void)
481 {
482         struct mm_struct *mm = current->mm;
483
484         exit_mm_release(current, mm);
485         if (!mm)
486                 return;
487         sync_mm_rss(mm);
488         mmap_read_lock(mm);
489         mmgrab(mm);
490         BUG_ON(mm != current->active_mm);
491         /* more a memory barrier than a real lock */
492         task_lock(current);
493         /*
494          * When a thread stops operating on an address space, the loop
495          * in membarrier_private_expedited() may not observe that
496          * tsk->mm, and the loop in membarrier_global_expedited() may
497          * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED
498          * rq->membarrier_state, so those would not issue an IPI.
499          * Membarrier requires a memory barrier after accessing
500          * user-space memory, before clearing tsk->mm or the
501          * rq->membarrier_state.
502          */
503         smp_mb__after_spinlock();
504         local_irq_disable();
505         current->mm = NULL;
506         membarrier_update_current_mm(NULL);
507         enter_lazy_tlb(mm, current);
508         local_irq_enable();
509         task_unlock(current);
510         mmap_read_unlock(mm);
511         mm_update_next_owner(mm);
512         mmput(mm);
513         if (test_thread_flag(TIF_MEMDIE))
514                 exit_oom_victim();
515 }
516
517 static struct task_struct *find_alive_thread(struct task_struct *p)
518 {
519         struct task_struct *t;
520
521         for_each_thread(p, t) {
522                 if (!(t->flags & PF_EXITING))
523                         return t;
524         }
525         return NULL;
526 }
527
528 static struct task_struct *find_child_reaper(struct task_struct *father,
529                                                 struct list_head *dead)
530         __releases(&tasklist_lock)
531         __acquires(&tasklist_lock)
532 {
533         struct pid_namespace *pid_ns = task_active_pid_ns(father);
534         struct task_struct *reaper = pid_ns->child_reaper;
535         struct task_struct *p, *n;
536
537         if (likely(reaper != father))
538                 return reaper;
539
540         reaper = find_alive_thread(father);
541         if (reaper) {
542                 pid_ns->child_reaper = reaper;
543                 return reaper;
544         }
545
546         write_unlock_irq(&tasklist_lock);
547
548         list_for_each_entry_safe(p, n, dead, ptrace_entry) {
549                 list_del_init(&p->ptrace_entry);
550                 release_task(p);
551         }
552
553         zap_pid_ns_processes(pid_ns);
554         write_lock_irq(&tasklist_lock);
555
556         return father;
557 }
558
559 /*
560  * When we die, we re-parent all our children, and try to:
561  * 1. give them to another thread in our thread group, if such a member exists
562  * 2. give it to the first ancestor process which prctl'd itself as a
563  *    child_subreaper for its children (like a service manager)
564  * 3. give it to the init process (PID 1) in our pid namespace
565  */
566 static struct task_struct *find_new_reaper(struct task_struct *father,
567                                            struct task_struct *child_reaper)
568 {
569         struct task_struct *thread, *reaper;
570
571         thread = find_alive_thread(father);
572         if (thread)
573                 return thread;
574
575         if (father->signal->has_child_subreaper) {
576                 unsigned int ns_level = task_pid(father)->level;
577                 /*
578                  * Find the first ->is_child_subreaper ancestor in our pid_ns.
579                  * We can't check reaper != child_reaper to ensure we do not
580                  * cross the namespaces, the exiting parent could be injected
581                  * by setns() + fork().
582                  * We check pid->level, this is slightly more efficient than
583                  * task_active_pid_ns(reaper) != task_active_pid_ns(father).
584                  */
585                 for (reaper = father->real_parent;
586                      task_pid(reaper)->level == ns_level;
587                      reaper = reaper->real_parent) {
588                         if (reaper == &init_task)
589                                 break;
590                         if (!reaper->signal->is_child_subreaper)
591                                 continue;
592                         thread = find_alive_thread(reaper);
593                         if (thread)
594                                 return thread;
595                 }
596         }
597
598         return child_reaper;
599 }
600
601 /*
602 * Any that need to be release_task'd are put on the @dead list.
603  */
604 static void reparent_leader(struct task_struct *father, struct task_struct *p,
605                                 struct list_head *dead)
606 {
607         if (unlikely(p->exit_state == EXIT_DEAD))
608                 return;
609
610         /* We don't want people slaying init. */
611         p->exit_signal = SIGCHLD;
612
613         /* If it has exited notify the new parent about this child's death. */
614         if (!p->ptrace &&
615             p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
616                 if (do_notify_parent(p, p->exit_signal)) {
617                         p->exit_state = EXIT_DEAD;
618                         list_add(&p->ptrace_entry, dead);
619                 }
620         }
621
622         kill_orphaned_pgrp(p, father);
623 }
624
625 /*
626  * This does two things:
627  *
628  * A.  Make init inherit all the child processes
629  * B.  Check to see if any process groups have become orphaned
630  *      as a result of our exiting, and if they have any stopped
631  *      jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
632  */
633 static void forget_original_parent(struct task_struct *father,
634                                         struct list_head *dead)
635 {
636         struct task_struct *p, *t, *reaper;
637
638         if (unlikely(!list_empty(&father->ptraced)))
639                 exit_ptrace(father, dead);
640
641         /* Can drop and reacquire tasklist_lock */
642         reaper = find_child_reaper(father, dead);
643         if (list_empty(&father->children))
644                 return;
645
646         reaper = find_new_reaper(father, reaper);
647         list_for_each_entry(p, &father->children, sibling) {
648                 for_each_thread(p, t) {
649                         RCU_INIT_POINTER(t->real_parent, reaper);
650                         BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
651                         if (likely(!t->ptrace))
652                                 t->parent = t->real_parent;
653                         if (t->pdeath_signal)
654                                 group_send_sig_info(t->pdeath_signal,
655                                                     SEND_SIG_NOINFO, t,
656                                                     PIDTYPE_TGID);
657                 }
658                 /*
659                  * If this is a threaded reparent there is no need to
660                  * notify anyone anything has happened.
661                  */
662                 if (!same_thread_group(reaper, father))
663                         reparent_leader(father, p, dead);
664         }
665         list_splice_tail_init(&father->children, &reaper->children);
666 }
667
668 /*
669  * Send signals to all our closest relatives so that they know
670  * to properly mourn us..
671  */
672 static void exit_notify(struct task_struct *tsk, int group_dead)
673 {
674         bool autoreap;
675         struct task_struct *p, *n;
676         LIST_HEAD(dead);
677
678         write_lock_irq(&tasklist_lock);
679         forget_original_parent(tsk, &dead);
680
681         if (group_dead)
682                 kill_orphaned_pgrp(tsk->group_leader, NULL);
683
684         tsk->exit_state = EXIT_ZOMBIE;
685         if (unlikely(tsk->ptrace)) {
686                 int sig = thread_group_leader(tsk) &&
687                                 thread_group_empty(tsk) &&
688                                 !ptrace_reparented(tsk) ?
689                         tsk->exit_signal : SIGCHLD;
690                 autoreap = do_notify_parent(tsk, sig);
691         } else if (thread_group_leader(tsk)) {
692                 autoreap = thread_group_empty(tsk) &&
693                         do_notify_parent(tsk, tsk->exit_signal);
694         } else {
695                 autoreap = true;
696         }
697
698         if (autoreap) {
699                 tsk->exit_state = EXIT_DEAD;
700                 list_add(&tsk->ptrace_entry, &dead);
701         }
702
703         /* mt-exec, de_thread() is waiting for group leader */
704         if (unlikely(tsk->signal->notify_count < 0))
705                 wake_up_process(tsk->signal->group_exec_task);
706         write_unlock_irq(&tasklist_lock);
707
708         list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
709                 list_del_init(&p->ptrace_entry);
710                 release_task(p);
711         }
712 }
713
714 #ifdef CONFIG_DEBUG_STACK_USAGE
715 static void check_stack_usage(void)
716 {
717         static DEFINE_SPINLOCK(low_water_lock);
718         static int lowest_to_date = THREAD_SIZE;
719         unsigned long free;
720
721         free = stack_not_used(current);
722
723         if (free >= lowest_to_date)
724                 return;
725
726         spin_lock(&low_water_lock);
727         if (free < lowest_to_date) {
728                 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
729                         current->comm, task_pid_nr(current), free);
730                 lowest_to_date = free;
731         }
732         spin_unlock(&low_water_lock);
733 }
734 #else
735 static inline void check_stack_usage(void) {}
736 #endif
737
738 void __noreturn do_exit(long code)
739 {
740         struct task_struct *tsk = current;
741         int group_dead;
742
743         WARN_ON(tsk->plug);
744
745         kcov_task_exit(tsk);
746         kmsan_task_exit(tsk);
747
748         coredump_task_exit(tsk);
749         ptrace_event(PTRACE_EVENT_EXIT, code);
750
751         validate_creds_for_do_exit(tsk);
752
753         io_uring_files_cancel();
754         exit_signals(tsk);  /* sets PF_EXITING */
755
756         /* sync mm's RSS info before statistics gathering */
757         if (tsk->mm)
758                 sync_mm_rss(tsk->mm);
759         acct_update_integrals(tsk);
760         group_dead = atomic_dec_and_test(&tsk->signal->live);
761         if (group_dead) {
762                 /*
763                  * If the last thread of global init has exited, panic
764                  * immediately to get a useable coredump.
765                  */
766                 if (unlikely(is_global_init(tsk)))
767                         panic("Attempted to kill init! exitcode=0x%08x\n",
768                                 tsk->signal->group_exit_code ?: (int)code);
769
770 #ifdef CONFIG_POSIX_TIMERS
771                 hrtimer_cancel(&tsk->signal->real_timer);
772                 exit_itimers(tsk);
773 #endif
774                 if (tsk->mm)
775                         setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
776         }
777         acct_collect(code, group_dead);
778         if (group_dead)
779                 tty_audit_exit();
780         audit_free(tsk);
781
782         tsk->exit_code = code;
783         taskstats_exit(tsk, group_dead);
784
785         exit_mm();
786
787         if (group_dead)
788                 acct_process();
789         trace_sched_process_exit(tsk);
790
791         exit_sem(tsk);
792         exit_shm(tsk);
793         exit_files(tsk);
794         exit_fs(tsk);
795         if (group_dead)
796                 disassociate_ctty(1);
797         exit_task_namespaces(tsk);
798         exit_task_work(tsk);
799         exit_thread(tsk);
800
801         /*
802          * Flush inherited counters to the parent - before the parent
803          * gets woken up by child-exit notifications.
804          *
805          * because of cgroup mode, must be called before cgroup_exit()
806          */
807         perf_event_exit_task(tsk);
808
809         sched_autogroup_exit_task(tsk);
810         cgroup_exit(tsk);
811
812         /*
813          * FIXME: do that only when needed, using sched_exit tracepoint
814          */
815         flush_ptrace_hw_breakpoint(tsk);
816
817         exit_tasks_rcu_start();
818         exit_notify(tsk, group_dead);
819         proc_exit_connector(tsk);
820         mpol_put_task_policy(tsk);
821 #ifdef CONFIG_FUTEX
822         if (unlikely(current->pi_state_cache))
823                 kfree(current->pi_state_cache);
824 #endif
825         /*
826          * Make sure we are holding no locks:
827          */
828         debug_check_no_locks_held();
829
830         if (tsk->io_context)
831                 exit_io_context(tsk);
832
833         if (tsk->splice_pipe)
834                 free_pipe_info(tsk->splice_pipe);
835
836         if (tsk->task_frag.page)
837                 put_page(tsk->task_frag.page);
838
839         validate_creds_for_do_exit(tsk);
840         exit_task_stack_account(tsk);
841
842         check_stack_usage();
843         preempt_disable();
844         if (tsk->nr_dirtied)
845                 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
846         exit_rcu();
847         exit_tasks_rcu_finish();
848
849         lockdep_free_task(tsk);
850         do_task_dead();
851 }
852
853 void __noreturn make_task_dead(int signr)
854 {
855         /*
856          * Take the task off the cpu after something catastrophic has
857          * happened.
858          *
859          * We can get here from a kernel oops, sometimes with preemption off.
860          * Start by checking for critical errors.
861          * Then fix up important state like USER_DS and preemption.
862          * Then do everything else.
863          */
864         struct task_struct *tsk = current;
865
866         if (unlikely(in_interrupt()))
867                 panic("Aiee, killing interrupt handler!");
868         if (unlikely(!tsk->pid))
869                 panic("Attempted to kill the idle task!");
870
871         if (unlikely(in_atomic())) {
872                 pr_info("note: %s[%d] exited with preempt_count %d\n",
873                         current->comm, task_pid_nr(current),
874                         preempt_count());
875                 preempt_count_set(PREEMPT_ENABLED);
876         }
877
878         /*
879          * We're taking recursive faults here in make_task_dead. Safest is to just
880          * leave this task alone and wait for reboot.
881          */
882         if (unlikely(tsk->flags & PF_EXITING)) {
883                 pr_alert("Fixing recursive fault but reboot is needed!\n");
884                 futex_exit_recursive(tsk);
885                 tsk->exit_state = EXIT_DEAD;
886                 refcount_inc(&tsk->rcu_users);
887                 do_task_dead();
888         }
889
890         do_exit(signr);
891 }
892
893 SYSCALL_DEFINE1(exit, int, error_code)
894 {
895         do_exit((error_code&0xff)<<8);
896 }
897
898 /*
899  * Take down every thread in the group.  This is called by fatal signals
900  * as well as by sys_exit_group (below).
901  */
902 void __noreturn
903 do_group_exit(int exit_code)
904 {
905         struct signal_struct *sig = current->signal;
906
907         if (sig->flags & SIGNAL_GROUP_EXIT)
908                 exit_code = sig->group_exit_code;
909         else if (sig->group_exec_task)
910                 exit_code = 0;
911         else if (!thread_group_empty(current)) {
912                 struct sighand_struct *const sighand = current->sighand;
913
914                 spin_lock_irq(&sighand->siglock);
915                 if (sig->flags & SIGNAL_GROUP_EXIT)
916                         /* Another thread got here before we took the lock.  */
917                         exit_code = sig->group_exit_code;
918                 else if (sig->group_exec_task)
919                         exit_code = 0;
920                 else {
921                         sig->group_exit_code = exit_code;
922                         sig->flags = SIGNAL_GROUP_EXIT;
923                         zap_other_threads(current);
924                 }
925                 spin_unlock_irq(&sighand->siglock);
926         }
927
928         do_exit(exit_code);
929         /* NOTREACHED */
930 }
931
932 /*
933  * this kills every thread in the thread group. Note that any externally
934  * wait4()-ing process will get the correct exit code - even if this
935  * thread is not the thread group leader.
936  */
937 SYSCALL_DEFINE1(exit_group, int, error_code)
938 {
939         do_group_exit((error_code & 0xff) << 8);
940         /* NOTREACHED */
941         return 0;
942 }
943
944 struct waitid_info {
945         pid_t pid;
946         uid_t uid;
947         int status;
948         int cause;
949 };
950
951 struct wait_opts {
952         enum pid_type           wo_type;
953         int                     wo_flags;
954         struct pid              *wo_pid;
955
956         struct waitid_info      *wo_info;
957         int                     wo_stat;
958         struct rusage           *wo_rusage;
959
960         wait_queue_entry_t              child_wait;
961         int                     notask_error;
962 };
963
964 static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
965 {
966         return  wo->wo_type == PIDTYPE_MAX ||
967                 task_pid_type(p, wo->wo_type) == wo->wo_pid;
968 }
969
970 static int
971 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
972 {
973         if (!eligible_pid(wo, p))
974                 return 0;
975
976         /*
977          * Wait for all children (clone and not) if __WALL is set or
978          * if it is traced by us.
979          */
980         if (ptrace || (wo->wo_flags & __WALL))
981                 return 1;
982
983         /*
984          * Otherwise, wait for clone children *only* if __WCLONE is set;
985          * otherwise, wait for non-clone children *only*.
986          *
987          * Note: a "clone" child here is one that reports to its parent
988          * using a signal other than SIGCHLD, or a non-leader thread which
989          * we can only see if it is traced by us.
990          */
991         if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
992                 return 0;
993
994         return 1;
995 }
996
997 /*
998  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
999  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1000  * the lock and this task is uninteresting.  If we return nonzero, we have
1001  * released the lock and the system call should return.
1002  */
1003 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1004 {
1005         int state, status;
1006         pid_t pid = task_pid_vnr(p);
1007         uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
1008         struct waitid_info *infop;
1009
1010         if (!likely(wo->wo_flags & WEXITED))
1011                 return 0;
1012
1013         if (unlikely(wo->wo_flags & WNOWAIT)) {
1014                 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1015                         ? p->signal->group_exit_code : p->exit_code;
1016                 get_task_struct(p);
1017                 read_unlock(&tasklist_lock);
1018                 sched_annotate_sleep();
1019                 if (wo->wo_rusage)
1020                         getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1021                 put_task_struct(p);
1022                 goto out_info;
1023         }
1024         /*
1025          * Move the task's state to DEAD/TRACE, only one thread can do this.
1026          */
1027         state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1028                 EXIT_TRACE : EXIT_DEAD;
1029         if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1030                 return 0;
1031         /*
1032          * We own this thread, nobody else can reap it.
1033          */
1034         read_unlock(&tasklist_lock);
1035         sched_annotate_sleep();
1036
1037         /*
1038          * Check thread_group_leader() to exclude the traced sub-threads.
1039          */
1040         if (state == EXIT_DEAD && thread_group_leader(p)) {
1041                 struct signal_struct *sig = p->signal;
1042                 struct signal_struct *psig = current->signal;
1043                 unsigned long maxrss;
1044                 u64 tgutime, tgstime;
1045
1046                 /*
1047                  * The resource counters for the group leader are in its
1048                  * own task_struct.  Those for dead threads in the group
1049                  * are in its signal_struct, as are those for the child
1050                  * processes it has previously reaped.  All these
1051                  * accumulate in the parent's signal_struct c* fields.
1052                  *
1053                  * We don't bother to take a lock here to protect these
1054                  * p->signal fields because the whole thread group is dead
1055                  * and nobody can change them.
1056                  *
1057                  * psig->stats_lock also protects us from our sub-threads
1058                  * which can reap other children at the same time. Until
1059                  * we change k_getrusage()-like users to rely on this lock
1060                  * we have to take ->siglock as well.
1061                  *
1062                  * We use thread_group_cputime_adjusted() to get times for
1063                  * the thread group, which consolidates times for all threads
1064                  * in the group including the group leader.
1065                  */
1066                 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1067                 spin_lock_irq(&current->sighand->siglock);
1068                 write_seqlock(&psig->stats_lock);
1069                 psig->cutime += tgutime + sig->cutime;
1070                 psig->cstime += tgstime + sig->cstime;
1071                 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1072                 psig->cmin_flt +=
1073                         p->min_flt + sig->min_flt + sig->cmin_flt;
1074                 psig->cmaj_flt +=
1075                         p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1076                 psig->cnvcsw +=
1077                         p->nvcsw + sig->nvcsw + sig->cnvcsw;
1078                 psig->cnivcsw +=
1079                         p->nivcsw + sig->nivcsw + sig->cnivcsw;
1080                 psig->cinblock +=
1081                         task_io_get_inblock(p) +
1082                         sig->inblock + sig->cinblock;
1083                 psig->coublock +=
1084                         task_io_get_oublock(p) +
1085                         sig->oublock + sig->coublock;
1086                 maxrss = max(sig->maxrss, sig->cmaxrss);
1087                 if (psig->cmaxrss < maxrss)
1088                         psig->cmaxrss = maxrss;
1089                 task_io_accounting_add(&psig->ioac, &p->ioac);
1090                 task_io_accounting_add(&psig->ioac, &sig->ioac);
1091                 write_sequnlock(&psig->stats_lock);
1092                 spin_unlock_irq(&current->sighand->siglock);
1093         }
1094
1095         if (wo->wo_rusage)
1096                 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1097         status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1098                 ? p->signal->group_exit_code : p->exit_code;
1099         wo->wo_stat = status;
1100
1101         if (state == EXIT_TRACE) {
1102                 write_lock_irq(&tasklist_lock);
1103                 /* We dropped tasklist, ptracer could die and untrace */
1104                 ptrace_unlink(p);
1105
1106                 /* If parent wants a zombie, don't release it now */
1107                 state = EXIT_ZOMBIE;
1108                 if (do_notify_parent(p, p->exit_signal))
1109                         state = EXIT_DEAD;
1110                 p->exit_state = state;
1111                 write_unlock_irq(&tasklist_lock);
1112         }
1113         if (state == EXIT_DEAD)
1114                 release_task(p);
1115
1116 out_info:
1117         infop = wo->wo_info;
1118         if (infop) {
1119                 if ((status & 0x7f) == 0) {
1120                         infop->cause = CLD_EXITED;
1121                         infop->status = status >> 8;
1122                 } else {
1123                         infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1124                         infop->status = status & 0x7f;
1125                 }
1126                 infop->pid = pid;
1127                 infop->uid = uid;
1128         }
1129
1130         return pid;
1131 }
1132
1133 static int *task_stopped_code(struct task_struct *p, bool ptrace)
1134 {
1135         if (ptrace) {
1136                 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
1137                         return &p->exit_code;
1138         } else {
1139                 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1140                         return &p->signal->group_exit_code;
1141         }
1142         return NULL;
1143 }
1144
1145 /**
1146  * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1147  * @wo: wait options
1148  * @ptrace: is the wait for ptrace
1149  * @p: task to wait for
1150  *
1151  * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1152  *
1153  * CONTEXT:
1154  * read_lock(&tasklist_lock), which is released if return value is
1155  * non-zero.  Also, grabs and releases @p->sighand->siglock.
1156  *
1157  * RETURNS:
1158  * 0 if wait condition didn't exist and search for other wait conditions
1159  * should continue.  Non-zero return, -errno on failure and @p's pid on
1160  * success, implies that tasklist_lock is released and wait condition
1161  * search should terminate.
1162  */
1163 static int wait_task_stopped(struct wait_opts *wo,
1164                                 int ptrace, struct task_struct *p)
1165 {
1166         struct waitid_info *infop;
1167         int exit_code, *p_code, why;
1168         uid_t uid = 0; /* unneeded, required by compiler */
1169         pid_t pid;
1170
1171         /*
1172          * Traditionally we see ptrace'd stopped tasks regardless of options.
1173          */
1174         if (!ptrace && !(wo->wo_flags & WUNTRACED))
1175                 return 0;
1176
1177         if (!task_stopped_code(p, ptrace))
1178                 return 0;
1179
1180         exit_code = 0;
1181         spin_lock_irq(&p->sighand->siglock);
1182
1183         p_code = task_stopped_code(p, ptrace);
1184         if (unlikely(!p_code))
1185                 goto unlock_sig;
1186
1187         exit_code = *p_code;
1188         if (!exit_code)
1189                 goto unlock_sig;
1190
1191         if (!unlikely(wo->wo_flags & WNOWAIT))
1192                 *p_code = 0;
1193
1194         uid = from_kuid_munged(current_user_ns(), task_uid(p));
1195 unlock_sig:
1196         spin_unlock_irq(&p->sighand->siglock);
1197         if (!exit_code)
1198                 return 0;
1199
1200         /*
1201          * Now we are pretty sure this task is interesting.
1202          * Make sure it doesn't get reaped out from under us while we
1203          * give up the lock and then examine it below.  We don't want to
1204          * keep holding onto the tasklist_lock while we call getrusage and
1205          * possibly take page faults for user memory.
1206          */
1207         get_task_struct(p);
1208         pid = task_pid_vnr(p);
1209         why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1210         read_unlock(&tasklist_lock);
1211         sched_annotate_sleep();
1212         if (wo->wo_rusage)
1213                 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1214         put_task_struct(p);
1215
1216         if (likely(!(wo->wo_flags & WNOWAIT)))
1217                 wo->wo_stat = (exit_code << 8) | 0x7f;
1218
1219         infop = wo->wo_info;
1220         if (infop) {
1221                 infop->cause = why;
1222                 infop->status = exit_code;
1223                 infop->pid = pid;
1224                 infop->uid = uid;
1225         }
1226         return pid;
1227 }
1228
1229 /*
1230  * Handle do_wait work for one task in a live, non-stopped state.
1231  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1232  * the lock and this task is uninteresting.  If we return nonzero, we have
1233  * released the lock and the system call should return.
1234  */
1235 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1236 {
1237         struct waitid_info *infop;
1238         pid_t pid;
1239         uid_t uid;
1240
1241         if (!unlikely(wo->wo_flags & WCONTINUED))
1242                 return 0;
1243
1244         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1245                 return 0;
1246
1247         spin_lock_irq(&p->sighand->siglock);
1248         /* Re-check with the lock held.  */
1249         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1250                 spin_unlock_irq(&p->sighand->siglock);
1251                 return 0;
1252         }
1253         if (!unlikely(wo->wo_flags & WNOWAIT))
1254                 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1255         uid = from_kuid_munged(current_user_ns(), task_uid(p));
1256         spin_unlock_irq(&p->sighand->siglock);
1257
1258         pid = task_pid_vnr(p);
1259         get_task_struct(p);
1260         read_unlock(&tasklist_lock);
1261         sched_annotate_sleep();
1262         if (wo->wo_rusage)
1263                 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1264         put_task_struct(p);
1265
1266         infop = wo->wo_info;
1267         if (!infop) {
1268                 wo->wo_stat = 0xffff;
1269         } else {
1270                 infop->cause = CLD_CONTINUED;
1271                 infop->pid = pid;
1272                 infop->uid = uid;
1273                 infop->status = SIGCONT;
1274         }
1275         return pid;
1276 }
1277
1278 /*
1279  * Consider @p for a wait by @parent.
1280  *
1281  * -ECHILD should be in ->notask_error before the first call.
1282  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1283  * Returns zero if the search for a child should continue;
1284  * then ->notask_error is 0 if @p is an eligible child,
1285  * or still -ECHILD.
1286  */
1287 static int wait_consider_task(struct wait_opts *wo, int ptrace,
1288                                 struct task_struct *p)
1289 {
1290         /*
1291          * We can race with wait_task_zombie() from another thread.
1292          * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1293          * can't confuse the checks below.
1294          */
1295         int exit_state = READ_ONCE(p->exit_state);
1296         int ret;
1297
1298         if (unlikely(exit_state == EXIT_DEAD))
1299                 return 0;
1300
1301         ret = eligible_child(wo, ptrace, p);
1302         if (!ret)
1303                 return ret;
1304
1305         if (unlikely(exit_state == EXIT_TRACE)) {
1306                 /*
1307                  * ptrace == 0 means we are the natural parent. In this case
1308                  * we should clear notask_error, debugger will notify us.
1309                  */
1310                 if (likely(!ptrace))
1311                         wo->notask_error = 0;
1312                 return 0;
1313         }
1314
1315         if (likely(!ptrace) && unlikely(p->ptrace)) {
1316                 /*
1317                  * If it is traced by its real parent's group, just pretend
1318                  * the caller is ptrace_do_wait() and reap this child if it
1319                  * is zombie.
1320                  *
1321                  * This also hides group stop state from real parent; otherwise
1322                  * a single stop can be reported twice as group and ptrace stop.
1323                  * If a ptracer wants to distinguish these two events for its
1324                  * own children it should create a separate process which takes
1325                  * the role of real parent.
1326                  */
1327                 if (!ptrace_reparented(p))
1328                         ptrace = 1;
1329         }
1330
1331         /* slay zombie? */
1332         if (exit_state == EXIT_ZOMBIE) {
1333                 /* we don't reap group leaders with subthreads */
1334                 if (!delay_group_leader(p)) {
1335                         /*
1336                          * A zombie ptracee is only visible to its ptracer.
1337                          * Notification and reaping will be cascaded to the
1338                          * real parent when the ptracer detaches.
1339                          */
1340                         if (unlikely(ptrace) || likely(!p->ptrace))
1341                                 return wait_task_zombie(wo, p);
1342                 }
1343
1344                 /*
1345                  * Allow access to stopped/continued state via zombie by
1346                  * falling through.  Clearing of notask_error is complex.
1347                  *
1348                  * When !@ptrace:
1349                  *
1350                  * If WEXITED is set, notask_error should naturally be
1351                  * cleared.  If not, subset of WSTOPPED|WCONTINUED is set,
1352                  * so, if there are live subthreads, there are events to
1353                  * wait for.  If all subthreads are dead, it's still safe
1354                  * to clear - this function will be called again in finite
1355                  * amount time once all the subthreads are released and
1356                  * will then return without clearing.
1357                  *
1358                  * When @ptrace:
1359                  *
1360                  * Stopped state is per-task and thus can't change once the
1361                  * target task dies.  Only continued and exited can happen.
1362                  * Clear notask_error if WCONTINUED | WEXITED.
1363                  */
1364                 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1365                         wo->notask_error = 0;
1366         } else {
1367                 /*
1368                  * @p is alive and it's gonna stop, continue or exit, so
1369                  * there always is something to wait for.
1370                  */
1371                 wo->notask_error = 0;
1372         }
1373
1374         /*
1375          * Wait for stopped.  Depending on @ptrace, different stopped state
1376          * is used and the two don't interact with each other.
1377          */
1378         ret = wait_task_stopped(wo, ptrace, p);
1379         if (ret)
1380                 return ret;
1381
1382         /*
1383          * Wait for continued.  There's only one continued state and the
1384          * ptracer can consume it which can confuse the real parent.  Don't
1385          * use WCONTINUED from ptracer.  You don't need or want it.
1386          */
1387         return wait_task_continued(wo, p);
1388 }
1389
1390 /*
1391  * Do the work of do_wait() for one thread in the group, @tsk.
1392  *
1393  * -ECHILD should be in ->notask_error before the first call.
1394  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1395  * Returns zero if the search for a child should continue; then
1396  * ->notask_error is 0 if there were any eligible children,
1397  * or still -ECHILD.
1398  */
1399 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1400 {
1401         struct task_struct *p;
1402
1403         list_for_each_entry(p, &tsk->children, sibling) {
1404                 int ret = wait_consider_task(wo, 0, p);
1405
1406                 if (ret)
1407                         return ret;
1408         }
1409
1410         return 0;
1411 }
1412
1413 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1414 {
1415         struct task_struct *p;
1416
1417         list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1418                 int ret = wait_consider_task(wo, 1, p);
1419
1420                 if (ret)
1421                         return ret;
1422         }
1423
1424         return 0;
1425 }
1426
1427 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
1428                                 int sync, void *key)
1429 {
1430         struct wait_opts *wo = container_of(wait, struct wait_opts,
1431                                                 child_wait);
1432         struct task_struct *p = key;
1433
1434         if (!eligible_pid(wo, p))
1435                 return 0;
1436
1437         if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1438                 return 0;
1439
1440         return default_wake_function(wait, mode, sync, key);
1441 }
1442
1443 void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1444 {
1445         __wake_up_sync_key(&parent->signal->wait_chldexit,
1446                            TASK_INTERRUPTIBLE, p);
1447 }
1448
1449 static bool is_effectively_child(struct wait_opts *wo, bool ptrace,
1450                                  struct task_struct *target)
1451 {
1452         struct task_struct *parent =
1453                 !ptrace ? target->real_parent : target->parent;
1454
1455         return current == parent || (!(wo->wo_flags & __WNOTHREAD) &&
1456                                      same_thread_group(current, parent));
1457 }
1458
1459 /*
1460  * Optimization for waiting on PIDTYPE_PID. No need to iterate through child
1461  * and tracee lists to find the target task.
1462  */
1463 static int do_wait_pid(struct wait_opts *wo)
1464 {
1465         bool ptrace;
1466         struct task_struct *target;
1467         int retval;
1468
1469         ptrace = false;
1470         target = pid_task(wo->wo_pid, PIDTYPE_TGID);
1471         if (target && is_effectively_child(wo, ptrace, target)) {
1472                 retval = wait_consider_task(wo, ptrace, target);
1473                 if (retval)
1474                         return retval;
1475         }
1476
1477         ptrace = true;
1478         target = pid_task(wo->wo_pid, PIDTYPE_PID);
1479         if (target && target->ptrace &&
1480             is_effectively_child(wo, ptrace, target)) {
1481                 retval = wait_consider_task(wo, ptrace, target);
1482                 if (retval)
1483                         return retval;
1484         }
1485
1486         return 0;
1487 }
1488
1489 static long do_wait(struct wait_opts *wo)
1490 {
1491         int retval;
1492
1493         trace_sched_process_wait(wo->wo_pid);
1494
1495         init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1496         wo->child_wait.private = current;
1497         add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1498 repeat:
1499         /*
1500          * If there is nothing that can match our criteria, just get out.
1501          * We will clear ->notask_error to zero if we see any child that
1502          * might later match our criteria, even if we are not able to reap
1503          * it yet.
1504          */
1505         wo->notask_error = -ECHILD;
1506         if ((wo->wo_type < PIDTYPE_MAX) &&
1507            (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
1508                 goto notask;
1509
1510         set_current_state(TASK_INTERRUPTIBLE);
1511         read_lock(&tasklist_lock);
1512
1513         if (wo->wo_type == PIDTYPE_PID) {
1514                 retval = do_wait_pid(wo);
1515                 if (retval)
1516                         goto end;
1517         } else {
1518                 struct task_struct *tsk = current;
1519
1520                 do {
1521                         retval = do_wait_thread(wo, tsk);
1522                         if (retval)
1523                                 goto end;
1524
1525                         retval = ptrace_do_wait(wo, tsk);
1526                         if (retval)
1527                                 goto end;
1528
1529                         if (wo->wo_flags & __WNOTHREAD)
1530                                 break;
1531                 } while_each_thread(current, tsk);
1532         }
1533         read_unlock(&tasklist_lock);
1534
1535 notask:
1536         retval = wo->notask_error;
1537         if (!retval && !(wo->wo_flags & WNOHANG)) {
1538                 retval = -ERESTARTSYS;
1539                 if (!signal_pending(current)) {
1540                         schedule();
1541                         goto repeat;
1542                 }
1543         }
1544 end:
1545         __set_current_state(TASK_RUNNING);
1546         remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1547         return retval;
1548 }
1549
1550 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
1551                           int options, struct rusage *ru)
1552 {
1553         struct wait_opts wo;
1554         struct pid *pid = NULL;
1555         enum pid_type type;
1556         long ret;
1557         unsigned int f_flags = 0;
1558
1559         if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1560                         __WNOTHREAD|__WCLONE|__WALL))
1561                 return -EINVAL;
1562         if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1563                 return -EINVAL;
1564
1565         switch (which) {
1566         case P_ALL:
1567                 type = PIDTYPE_MAX;
1568                 break;
1569         case P_PID:
1570                 type = PIDTYPE_PID;
1571                 if (upid <= 0)
1572                         return -EINVAL;
1573
1574                 pid = find_get_pid(upid);
1575                 break;
1576         case P_PGID:
1577                 type = PIDTYPE_PGID;
1578                 if (upid < 0)
1579                         return -EINVAL;
1580
1581                 if (upid)
1582                         pid = find_get_pid(upid);
1583                 else
1584                         pid = get_task_pid(current, PIDTYPE_PGID);
1585                 break;
1586         case P_PIDFD:
1587                 type = PIDTYPE_PID;
1588                 if (upid < 0)
1589                         return -EINVAL;
1590
1591                 pid = pidfd_get_pid(upid, &f_flags);
1592                 if (IS_ERR(pid))
1593                         return PTR_ERR(pid);
1594
1595                 break;
1596         default:
1597                 return -EINVAL;
1598         }
1599
1600         wo.wo_type      = type;
1601         wo.wo_pid       = pid;
1602         wo.wo_flags     = options;
1603         wo.wo_info      = infop;
1604         wo.wo_rusage    = ru;
1605         if (f_flags & O_NONBLOCK)
1606                 wo.wo_flags |= WNOHANG;
1607
1608         ret = do_wait(&wo);
1609         if (!ret && !(options & WNOHANG) && (f_flags & O_NONBLOCK))
1610                 ret = -EAGAIN;
1611
1612         put_pid(pid);
1613         return ret;
1614 }
1615
1616 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1617                 infop, int, options, struct rusage __user *, ru)
1618 {
1619         struct rusage r;
1620         struct waitid_info info = {.status = 0};
1621         long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
1622         int signo = 0;
1623
1624         if (err > 0) {
1625                 signo = SIGCHLD;
1626                 err = 0;
1627                 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1628                         return -EFAULT;
1629         }
1630         if (!infop)
1631                 return err;
1632
1633         if (!user_write_access_begin(infop, sizeof(*infop)))
1634                 return -EFAULT;
1635
1636         unsafe_put_user(signo, &infop->si_signo, Efault);
1637         unsafe_put_user(0, &infop->si_errno, Efault);
1638         unsafe_put_user(info.cause, &infop->si_code, Efault);
1639         unsafe_put_user(info.pid, &infop->si_pid, Efault);
1640         unsafe_put_user(info.uid, &infop->si_uid, Efault);
1641         unsafe_put_user(info.status, &infop->si_status, Efault);
1642         user_write_access_end();
1643         return err;
1644 Efault:
1645         user_write_access_end();
1646         return -EFAULT;
1647 }
1648
1649 long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
1650                   struct rusage *ru)
1651 {
1652         struct wait_opts wo;
1653         struct pid *pid = NULL;
1654         enum pid_type type;
1655         long ret;
1656
1657         if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1658                         __WNOTHREAD|__WCLONE|__WALL))
1659                 return -EINVAL;
1660
1661         /* -INT_MIN is not defined */
1662         if (upid == INT_MIN)
1663                 return -ESRCH;
1664
1665         if (upid == -1)
1666                 type = PIDTYPE_MAX;
1667         else if (upid < 0) {
1668                 type = PIDTYPE_PGID;
1669                 pid = find_get_pid(-upid);
1670         } else if (upid == 0) {
1671                 type = PIDTYPE_PGID;
1672                 pid = get_task_pid(current, PIDTYPE_PGID);
1673         } else /* upid > 0 */ {
1674                 type = PIDTYPE_PID;
1675                 pid = find_get_pid(upid);
1676         }
1677
1678         wo.wo_type      = type;
1679         wo.wo_pid       = pid;
1680         wo.wo_flags     = options | WEXITED;
1681         wo.wo_info      = NULL;
1682         wo.wo_stat      = 0;
1683         wo.wo_rusage    = ru;
1684         ret = do_wait(&wo);
1685         put_pid(pid);
1686         if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
1687                 ret = -EFAULT;
1688
1689         return ret;
1690 }
1691
1692 int kernel_wait(pid_t pid, int *stat)
1693 {
1694         struct wait_opts wo = {
1695                 .wo_type        = PIDTYPE_PID,
1696                 .wo_pid         = find_get_pid(pid),
1697                 .wo_flags       = WEXITED,
1698         };
1699         int ret;
1700
1701         ret = do_wait(&wo);
1702         if (ret > 0 && wo.wo_stat)
1703                 *stat = wo.wo_stat;
1704         put_pid(wo.wo_pid);
1705         return ret;
1706 }
1707
1708 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1709                 int, options, struct rusage __user *, ru)
1710 {
1711         struct rusage r;
1712         long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
1713
1714         if (err > 0) {
1715                 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1716                         return -EFAULT;
1717         }
1718         return err;
1719 }
1720
1721 #ifdef __ARCH_WANT_SYS_WAITPID
1722
1723 /*
1724  * sys_waitpid() remains for compatibility. waitpid() should be
1725  * implemented by calling sys_wait4() from libc.a.
1726  */
1727 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1728 {
1729         return kernel_wait4(pid, stat_addr, options, NULL);
1730 }
1731
1732 #endif
1733
1734 #ifdef CONFIG_COMPAT
1735 COMPAT_SYSCALL_DEFINE4(wait4,
1736         compat_pid_t, pid,
1737         compat_uint_t __user *, stat_addr,
1738         int, options,
1739         struct compat_rusage __user *, ru)
1740 {
1741         struct rusage r;
1742         long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
1743         if (err > 0) {
1744                 if (ru && put_compat_rusage(&r, ru))
1745                         return -EFAULT;
1746         }
1747         return err;
1748 }
1749
1750 COMPAT_SYSCALL_DEFINE5(waitid,
1751                 int, which, compat_pid_t, pid,
1752                 struct compat_siginfo __user *, infop, int, options,
1753                 struct compat_rusage __user *, uru)
1754 {
1755         struct rusage ru;
1756         struct waitid_info info = {.status = 0};
1757         long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
1758         int signo = 0;
1759         if (err > 0) {
1760                 signo = SIGCHLD;
1761                 err = 0;
1762                 if (uru) {
1763                         /* kernel_waitid() overwrites everything in ru */
1764                         if (COMPAT_USE_64BIT_TIME)
1765                                 err = copy_to_user(uru, &ru, sizeof(ru));
1766                         else
1767                                 err = put_compat_rusage(&ru, uru);
1768                         if (err)
1769                                 return -EFAULT;
1770                 }
1771         }
1772
1773         if (!infop)
1774                 return err;
1775
1776         if (!user_write_access_begin(infop, sizeof(*infop)))
1777                 return -EFAULT;
1778
1779         unsafe_put_user(signo, &infop->si_signo, Efault);
1780         unsafe_put_user(0, &infop->si_errno, Efault);
1781         unsafe_put_user(info.cause, &infop->si_code, Efault);
1782         unsafe_put_user(info.pid, &infop->si_pid, Efault);
1783         unsafe_put_user(info.uid, &infop->si_uid, Efault);
1784         unsafe_put_user(info.status, &infop->si_status, Efault);
1785         user_write_access_end();
1786         return err;
1787 Efault:
1788         user_write_access_end();
1789         return -EFAULT;
1790 }
1791 #endif
1792
1793 /**
1794  * thread_group_exited - check that a thread group has exited
1795  * @pid: tgid of thread group to be checked.
1796  *
1797  * Test if the thread group represented by tgid has exited (all
1798  * threads are zombies, dead or completely gone).
1799  *
1800  * Return: true if the thread group has exited. false otherwise.
1801  */
1802 bool thread_group_exited(struct pid *pid)
1803 {
1804         struct task_struct *task;
1805         bool exited;
1806
1807         rcu_read_lock();
1808         task = pid_task(pid, PIDTYPE_PID);
1809         exited = !task ||
1810                 (READ_ONCE(task->exit_state) && thread_group_empty(task));
1811         rcu_read_unlock();
1812
1813         return exited;
1814 }
1815 EXPORT_SYMBOL(thread_group_exited);
1816
1817 __weak void abort(void)
1818 {
1819         BUG();
1820
1821         /* if that doesn't kill us, halt */
1822         panic("Oops failed to kill thread");
1823 }
1824 EXPORT_SYMBOL(abort);
This page took 0.133168 seconds and 4 git commands to generate.