2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
22 #include <linux/file.h>
24 #include <linux/proc_fs.h>
25 #include <linux/tty.h>
26 #include <linux/binfmts.h>
27 #include <linux/coredump.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 #include <linux/ptrace.h>
31 #include <linux/signal.h>
32 #include <linux/signalfd.h>
33 #include <linux/ratelimit.h>
34 #include <linux/tracehook.h>
35 #include <linux/capability.h>
36 #include <linux/freezer.h>
37 #include <linux/pid_namespace.h>
38 #include <linux/nsproxy.h>
39 #include <linux/user_namespace.h>
40 #include <linux/uprobes.h>
41 #include <linux/compat.h>
42 #include <linux/cn_proc.h>
43 #include <linux/compiler.h>
44 #include <linux/posix-timers.h>
45 #include <linux/livepatch.h>
46 #include <linux/cgroup.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/signal.h>
51 #include <asm/param.h>
52 #include <linux/uaccess.h>
53 #include <asm/unistd.h>
54 #include <asm/siginfo.h>
55 #include <asm/cacheflush.h>
56 #include "audit.h" /* audit_signal_info() */
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 return sig_handler_ignored(handler, sig);
95 static bool sig_ignored(struct task_struct *t, int sig, bool force)
98 * Blocked signals are never ignored, since the
99 * signal handler may change by the time it is
102 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
106 * Tracers may want to know about even ignored signal unless it
107 * is SIGKILL which can't be reported anyway but can be ignored
108 * by SIGNAL_UNKILLABLE task.
110 if (t->ptrace && sig != SIGKILL)
113 return sig_task_ignored(t, sig, force);
117 * Re-calculate pending state from the set of locally pending
118 * signals, globally pending signals, and blocked signals.
120 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
125 switch (_NSIG_WORDS) {
127 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
128 ready |= signal->sig[i] &~ blocked->sig[i];
131 case 4: ready = signal->sig[3] &~ blocked->sig[3];
132 ready |= signal->sig[2] &~ blocked->sig[2];
133 ready |= signal->sig[1] &~ blocked->sig[1];
134 ready |= signal->sig[0] &~ blocked->sig[0];
137 case 2: ready = signal->sig[1] &~ blocked->sig[1];
138 ready |= signal->sig[0] &~ blocked->sig[0];
141 case 1: ready = signal->sig[0] &~ blocked->sig[0];
146 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
148 static bool recalc_sigpending_tsk(struct task_struct *t)
150 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
151 PENDING(&t->pending, &t->blocked) ||
152 PENDING(&t->signal->shared_pending, &t->blocked) ||
153 cgroup_task_frozen(t)) {
154 set_tsk_thread_flag(t, TIF_SIGPENDING);
159 * We must never clear the flag in another thread, or in current
160 * when it's possible the current syscall is returning -ERESTART*.
161 * So we don't clear it here, and only callers who know they should do.
167 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
168 * This is superfluous when called on current, the wakeup is a harmless no-op.
170 void recalc_sigpending_and_wake(struct task_struct *t)
172 if (recalc_sigpending_tsk(t))
173 signal_wake_up(t, 0);
176 void recalc_sigpending(void)
178 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
179 !klp_patch_pending(current))
180 clear_thread_flag(TIF_SIGPENDING);
183 EXPORT_SYMBOL(recalc_sigpending);
185 void calculate_sigpending(void)
187 /* Have any signals or users of TIF_SIGPENDING been delayed
190 spin_lock_irq(¤t->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
193 spin_unlock_irq(¤t->sighand->siglock);
196 /* Given the mask, find the first available signal that should be serviced. */
198 #define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 int next_signal(struct sigpending *pending, sigset_t *mask)
204 unsigned long i, *s, *m, x;
207 s = pending->signal.sig;
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
222 switch (_NSIG_WORDS) {
224 for (i = 1; i < _NSIG_WORDS; ++i) {
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
237 sig = ffz(~x) + _NSIG_BPW + 1;
248 static inline void print_dropped_signal(int sig)
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 if (!print_fatal_signals)
255 if (!__ratelimit(&ratelimit_state))
258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
259 current->comm, current->pid, sig);
263 * task_set_jobctl_pending - set jobctl pending bits
265 * @mask: pending bits to set
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
274 * Must be called with @task->sighand->siglock held.
277 * %true if @mask is set, %false if made noop because @task was dying.
279 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 task->jobctl |= mask;
296 * task_clear_jobctl_trapping - clear jobctl trapping bit
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
305 * Must be called with @task->sighand->siglock held.
307 void task_clear_jobctl_trapping(struct task_struct *task)
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
311 smp_mb(); /* advised by wake_up_bit() */
312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 * task_clear_jobctl_pending - clear jobctl pending bits
319 * @mask: pending bits to clear
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
329 * Must be called with @task->sighand->siglock held.
331 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 task->jobctl &= ~mask;
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
349 * Group stop states are cleared and the group stop count is consumed if
350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
351 * stop, the appropriate %SIGNAL_* flags are set.
354 * Must be called with @task->sighand->siglock held.
357 * %true if group stop completion should be notified to the parent, %false
360 static bool task_participate_group_stop(struct task_struct *task)
362 struct signal_struct *sig = task->signal;
363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 void task_join_group_stop(struct task_struct *task)
388 /* Have the new thread join an on-going signal group stop */
389 unsigned long jobctl = current->jobctl;
390 if (jobctl & JOBCTL_STOP_PENDING) {
391 struct signal_struct *sig = current->signal;
392 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
393 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
394 if (task_set_jobctl_pending(task, signr | gstop)) {
395 sig->group_stop_count++;
401 * allocate a new signal queue record
402 * - this may be called without locks if and only if t == current, otherwise an
403 * appropriate lock must be held to stop the target task from exiting
405 static struct sigqueue *
406 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
408 struct sigqueue *q = NULL;
409 struct user_struct *user;
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
416 user = get_uid(__task_cred(t)->user);
417 atomic_inc(&user->sigpending);
420 if (override_rlimit ||
421 atomic_read(&user->sigpending) <=
422 task_rlimit(t, RLIMIT_SIGPENDING)) {
423 q = kmem_cache_alloc(sigqueue_cachep, flags);
425 print_dropped_signal(sig);
428 if (unlikely(q == NULL)) {
429 atomic_dec(&user->sigpending);
432 INIT_LIST_HEAD(&q->list);
440 static void __sigqueue_free(struct sigqueue *q)
442 if (q->flags & SIGQUEUE_PREALLOC)
444 atomic_dec(&q->user->sigpending);
446 kmem_cache_free(sigqueue_cachep, q);
449 void flush_sigqueue(struct sigpending *queue)
453 sigemptyset(&queue->signal);
454 while (!list_empty(&queue->list)) {
455 q = list_entry(queue->list.next, struct sigqueue , list);
456 list_del_init(&q->list);
462 * Flush all pending signals for this kthread.
464 void flush_signals(struct task_struct *t)
468 spin_lock_irqsave(&t->sighand->siglock, flags);
469 clear_tsk_thread_flag(t, TIF_SIGPENDING);
470 flush_sigqueue(&t->pending);
471 flush_sigqueue(&t->signal->shared_pending);
472 spin_unlock_irqrestore(&t->sighand->siglock, flags);
474 EXPORT_SYMBOL(flush_signals);
476 #ifdef CONFIG_POSIX_TIMERS
477 static void __flush_itimer_signals(struct sigpending *pending)
479 sigset_t signal, retain;
480 struct sigqueue *q, *n;
482 signal = pending->signal;
483 sigemptyset(&retain);
485 list_for_each_entry_safe(q, n, &pending->list, list) {
486 int sig = q->info.si_signo;
488 if (likely(q->info.si_code != SI_TIMER)) {
489 sigaddset(&retain, sig);
491 sigdelset(&signal, sig);
492 list_del_init(&q->list);
497 sigorsets(&pending->signal, &signal, &retain);
500 void flush_itimer_signals(void)
502 struct task_struct *tsk = current;
505 spin_lock_irqsave(&tsk->sighand->siglock, flags);
506 __flush_itimer_signals(&tsk->pending);
507 __flush_itimer_signals(&tsk->signal->shared_pending);
508 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
512 void ignore_signals(struct task_struct *t)
516 for (i = 0; i < _NSIG; ++i)
517 t->sighand->action[i].sa.sa_handler = SIG_IGN;
523 * Flush all handlers for a task.
527 flush_signal_handlers(struct task_struct *t, int force_default)
530 struct k_sigaction *ka = &t->sighand->action[0];
531 for (i = _NSIG ; i != 0 ; i--) {
532 if (force_default || ka->sa.sa_handler != SIG_IGN)
533 ka->sa.sa_handler = SIG_DFL;
535 #ifdef __ARCH_HAS_SA_RESTORER
536 ka->sa.sa_restorer = NULL;
538 sigemptyset(&ka->sa.sa_mask);
543 bool unhandled_signal(struct task_struct *tsk, int sig)
545 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
546 if (is_global_init(tsk))
549 if (handler != SIG_IGN && handler != SIG_DFL)
552 /* if ptraced, let the tracer determine */
556 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
559 struct sigqueue *q, *first = NULL;
562 * Collect the siginfo appropriate to this signal. Check if
563 * there is another siginfo for the same signal.
565 list_for_each_entry(q, &list->list, list) {
566 if (q->info.si_signo == sig) {
573 sigdelset(&list->signal, sig);
577 list_del_init(&first->list);
578 copy_siginfo(info, &first->info);
581 (first->flags & SIGQUEUE_PREALLOC) &&
582 (info->si_code == SI_TIMER) &&
583 (info->si_sys_private);
585 __sigqueue_free(first);
588 * Ok, it wasn't in the queue. This must be
589 * a fast-pathed signal or we must have been
590 * out of queue space. So zero out the info.
593 info->si_signo = sig;
595 info->si_code = SI_USER;
601 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
602 kernel_siginfo_t *info, bool *resched_timer)
604 int sig = next_signal(pending, mask);
607 collect_signal(sig, pending, info, resched_timer);
612 * Dequeue a signal and return the element to the caller, which is
613 * expected to free it.
615 * All callers have to hold the siglock.
617 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
619 bool resched_timer = false;
622 /* We only dequeue private signals from ourselves, we don't let
623 * signalfd steal them
625 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
627 signr = __dequeue_signal(&tsk->signal->shared_pending,
628 mask, info, &resched_timer);
629 #ifdef CONFIG_POSIX_TIMERS
633 * itimers are process shared and we restart periodic
634 * itimers in the signal delivery path to prevent DoS
635 * attacks in the high resolution timer case. This is
636 * compliant with the old way of self-restarting
637 * itimers, as the SIGALRM is a legacy signal and only
638 * queued once. Changing the restart behaviour to
639 * restart the timer in the signal dequeue path is
640 * reducing the timer noise on heavy loaded !highres
643 if (unlikely(signr == SIGALRM)) {
644 struct hrtimer *tmr = &tsk->signal->real_timer;
646 if (!hrtimer_is_queued(tmr) &&
647 tsk->signal->it_real_incr != 0) {
648 hrtimer_forward(tmr, tmr->base->get_time(),
649 tsk->signal->it_real_incr);
650 hrtimer_restart(tmr);
660 if (unlikely(sig_kernel_stop(signr))) {
662 * Set a marker that we have dequeued a stop signal. Our
663 * caller might release the siglock and then the pending
664 * stop signal it is about to process is no longer in the
665 * pending bitmasks, but must still be cleared by a SIGCONT
666 * (and overruled by a SIGKILL). So those cases clear this
667 * shared flag after we've set it. Note that this flag may
668 * remain set after the signal we return is ignored or
669 * handled. That doesn't matter because its only purpose
670 * is to alert stop-signal processing code when another
671 * processor has come along and cleared the flag.
673 current->jobctl |= JOBCTL_STOP_DEQUEUED;
675 #ifdef CONFIG_POSIX_TIMERS
678 * Release the siglock to ensure proper locking order
679 * of timer locks outside of siglocks. Note, we leave
680 * irqs disabled here, since the posix-timers code is
681 * about to disable them again anyway.
683 spin_unlock(&tsk->sighand->siglock);
684 posixtimer_rearm(info);
685 spin_lock(&tsk->sighand->siglock);
687 /* Don't expose the si_sys_private value to userspace */
688 info->si_sys_private = 0;
693 EXPORT_SYMBOL_GPL(dequeue_signal);
695 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
697 struct task_struct *tsk = current;
698 struct sigpending *pending = &tsk->pending;
699 struct sigqueue *q, *sync = NULL;
702 * Might a synchronous signal be in the queue?
704 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
708 * Return the first synchronous signal in the queue.
710 list_for_each_entry(q, &pending->list, list) {
711 /* Synchronous signals have a postive si_code */
712 if ((q->info.si_code > SI_USER) &&
713 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
721 * Check if there is another siginfo for the same signal.
723 list_for_each_entry_continue(q, &pending->list, list) {
724 if (q->info.si_signo == sync->info.si_signo)
728 sigdelset(&pending->signal, sync->info.si_signo);
731 list_del_init(&sync->list);
732 copy_siginfo(info, &sync->info);
733 __sigqueue_free(sync);
734 return info->si_signo;
738 * Tell a process that it has a new active signal..
740 * NOTE! we rely on the previous spin_lock to
741 * lock interrupts for us! We can only be called with
742 * "siglock" held, and the local interrupt must
743 * have been disabled when that got acquired!
745 * No need to set need_resched since signal event passing
746 * goes through ->blocked
748 void signal_wake_up_state(struct task_struct *t, unsigned int state)
750 set_tsk_thread_flag(t, TIF_SIGPENDING);
752 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
753 * case. We don't check t->state here because there is a race with it
754 * executing another processor and just now entering stopped state.
755 * By using wake_up_state, we ensure the process will wake up and
756 * handle its death signal.
758 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
763 * Remove signals in mask from the pending set and queue.
764 * Returns 1 if any signals were found.
766 * All callers must be holding the siglock.
768 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
770 struct sigqueue *q, *n;
773 sigandsets(&m, mask, &s->signal);
774 if (sigisemptyset(&m))
777 sigandnsets(&s->signal, &s->signal, mask);
778 list_for_each_entry_safe(q, n, &s->list, list) {
779 if (sigismember(mask, q->info.si_signo)) {
780 list_del_init(&q->list);
786 static inline int is_si_special(const struct kernel_siginfo *info)
788 return info <= SEND_SIG_PRIV;
791 static inline bool si_fromuser(const struct kernel_siginfo *info)
793 return info == SEND_SIG_NOINFO ||
794 (!is_si_special(info) && SI_FROMUSER(info));
798 * called with RCU read lock from check_kill_permission()
800 static bool kill_ok_by_cred(struct task_struct *t)
802 const struct cred *cred = current_cred();
803 const struct cred *tcred = __task_cred(t);
805 return uid_eq(cred->euid, tcred->suid) ||
806 uid_eq(cred->euid, tcred->uid) ||
807 uid_eq(cred->uid, tcred->suid) ||
808 uid_eq(cred->uid, tcred->uid) ||
809 ns_capable(tcred->user_ns, CAP_KILL);
813 * Bad permissions for sending the signal
814 * - the caller must hold the RCU read lock
816 static int check_kill_permission(int sig, struct kernel_siginfo *info,
817 struct task_struct *t)
822 if (!valid_signal(sig))
825 if (!si_fromuser(info))
828 error = audit_signal_info(sig, t); /* Let audit system see the signal */
832 if (!same_thread_group(current, t) &&
833 !kill_ok_by_cred(t)) {
836 sid = task_session(t);
838 * We don't return the error if sid == NULL. The
839 * task was unhashed, the caller must notice this.
841 if (!sid || sid == task_session(current))
849 return security_task_kill(t, info, sig, NULL);
853 * ptrace_trap_notify - schedule trap to notify ptracer
854 * @t: tracee wanting to notify tracer
856 * This function schedules sticky ptrace trap which is cleared on the next
857 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
860 * If @t is running, STOP trap will be taken. If trapped for STOP and
861 * ptracer is listening for events, tracee is woken up so that it can
862 * re-trap for the new event. If trapped otherwise, STOP trap will be
863 * eventually taken without returning to userland after the existing traps
864 * are finished by PTRACE_CONT.
867 * Must be called with @task->sighand->siglock held.
869 static void ptrace_trap_notify(struct task_struct *t)
871 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
872 assert_spin_locked(&t->sighand->siglock);
874 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
875 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
879 * Handle magic process-wide effects of stop/continue signals. Unlike
880 * the signal actions, these happen immediately at signal-generation
881 * time regardless of blocking, ignoring, or handling. This does the
882 * actual continuing for SIGCONT, but not the actual stopping for stop
883 * signals. The process stop is done as a signal action for SIG_DFL.
885 * Returns true if the signal should be actually delivered, otherwise
886 * it should be dropped.
888 static bool prepare_signal(int sig, struct task_struct *p, bool force)
890 struct signal_struct *signal = p->signal;
891 struct task_struct *t;
894 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
895 if (!(signal->flags & SIGNAL_GROUP_EXIT))
896 return sig == SIGKILL;
898 * The process is in the middle of dying, nothing to do.
900 } else if (sig_kernel_stop(sig)) {
902 * This is a stop signal. Remove SIGCONT from all queues.
904 siginitset(&flush, sigmask(SIGCONT));
905 flush_sigqueue_mask(&flush, &signal->shared_pending);
906 for_each_thread(p, t)
907 flush_sigqueue_mask(&flush, &t->pending);
908 } else if (sig == SIGCONT) {
911 * Remove all stop signals from all queues, wake all threads.
913 siginitset(&flush, SIG_KERNEL_STOP_MASK);
914 flush_sigqueue_mask(&flush, &signal->shared_pending);
915 for_each_thread(p, t) {
916 flush_sigqueue_mask(&flush, &t->pending);
917 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
918 if (likely(!(t->ptrace & PT_SEIZED)))
919 wake_up_state(t, __TASK_STOPPED);
921 ptrace_trap_notify(t);
925 * Notify the parent with CLD_CONTINUED if we were stopped.
927 * If we were in the middle of a group stop, we pretend it
928 * was already finished, and then continued. Since SIGCHLD
929 * doesn't queue we report only CLD_STOPPED, as if the next
930 * CLD_CONTINUED was dropped.
933 if (signal->flags & SIGNAL_STOP_STOPPED)
934 why |= SIGNAL_CLD_CONTINUED;
935 else if (signal->group_stop_count)
936 why |= SIGNAL_CLD_STOPPED;
940 * The first thread which returns from do_signal_stop()
941 * will take ->siglock, notice SIGNAL_CLD_MASK, and
942 * notify its parent. See get_signal().
944 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
945 signal->group_stop_count = 0;
946 signal->group_exit_code = 0;
950 return !sig_ignored(p, sig, force);
954 * Test if P wants to take SIG. After we've checked all threads with this,
955 * it's equivalent to finding no threads not blocking SIG. Any threads not
956 * blocking SIG were ruled out because they are not running and already
957 * have pending signals. Such threads will dequeue from the shared queue
958 * as soon as they're available, so putting the signal on the shared queue
959 * will be equivalent to sending it to one such thread.
961 static inline bool wants_signal(int sig, struct task_struct *p)
963 if (sigismember(&p->blocked, sig))
966 if (p->flags & PF_EXITING)
972 if (task_is_stopped_or_traced(p))
975 return task_curr(p) || !signal_pending(p);
978 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
980 struct signal_struct *signal = p->signal;
981 struct task_struct *t;
984 * Now find a thread we can wake up to take the signal off the queue.
986 * If the main thread wants the signal, it gets first crack.
987 * Probably the least surprising to the average bear.
989 if (wants_signal(sig, p))
991 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
993 * There is just one thread and it does not need to be woken.
994 * It will dequeue unblocked signals before it runs again.
999 * Otherwise try to find a suitable thread.
1001 t = signal->curr_target;
1002 while (!wants_signal(sig, t)) {
1004 if (t == signal->curr_target)
1006 * No thread needs to be woken.
1007 * Any eligible threads will see
1008 * the signal in the queue soon.
1012 signal->curr_target = t;
1016 * Found a killable thread. If the signal will be fatal,
1017 * then start taking the whole group down immediately.
1019 if (sig_fatal(p, sig) &&
1020 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1021 !sigismember(&t->real_blocked, sig) &&
1022 (sig == SIGKILL || !p->ptrace)) {
1024 * This signal will be fatal to the whole group.
1026 if (!sig_kernel_coredump(sig)) {
1028 * Start a group exit and wake everybody up.
1029 * This way we don't have other threads
1030 * running and doing things after a slower
1031 * thread has the fatal signal pending.
1033 signal->flags = SIGNAL_GROUP_EXIT;
1034 signal->group_exit_code = sig;
1035 signal->group_stop_count = 0;
1038 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1039 sigaddset(&t->pending.signal, SIGKILL);
1040 signal_wake_up(t, 1);
1041 } while_each_thread(p, t);
1047 * The signal is already in the shared-pending queue.
1048 * Tell the chosen thread to wake up and dequeue it.
1050 signal_wake_up(t, sig == SIGKILL);
1054 static inline bool legacy_queue(struct sigpending *signals, int sig)
1056 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1059 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1060 enum pid_type type, bool force)
1062 struct sigpending *pending;
1064 int override_rlimit;
1065 int ret = 0, result;
1067 assert_spin_locked(&t->sighand->siglock);
1069 result = TRACE_SIGNAL_IGNORED;
1070 if (!prepare_signal(sig, t, force))
1073 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1075 * Short-circuit ignored signals and support queuing
1076 * exactly one non-rt signal, so that we can get more
1077 * detailed information about the cause of the signal.
1079 result = TRACE_SIGNAL_ALREADY_PENDING;
1080 if (legacy_queue(pending, sig))
1083 result = TRACE_SIGNAL_DELIVERED;
1085 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1087 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1091 * Real-time signals must be queued if sent by sigqueue, or
1092 * some other real-time mechanism. It is implementation
1093 * defined whether kill() does so. We attempt to do so, on
1094 * the principle of least surprise, but since kill is not
1095 * allowed to fail with EAGAIN when low on memory we just
1096 * make sure at least one signal gets delivered and don't
1097 * pass on the info struct.
1100 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1102 override_rlimit = 0;
1104 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1106 list_add_tail(&q->list, &pending->list);
1107 switch ((unsigned long) info) {
1108 case (unsigned long) SEND_SIG_NOINFO:
1109 clear_siginfo(&q->info);
1110 q->info.si_signo = sig;
1111 q->info.si_errno = 0;
1112 q->info.si_code = SI_USER;
1113 q->info.si_pid = task_tgid_nr_ns(current,
1114 task_active_pid_ns(t));
1117 from_kuid_munged(task_cred_xxx(t, user_ns),
1121 case (unsigned long) SEND_SIG_PRIV:
1122 clear_siginfo(&q->info);
1123 q->info.si_signo = sig;
1124 q->info.si_errno = 0;
1125 q->info.si_code = SI_KERNEL;
1130 copy_siginfo(&q->info, info);
1133 } else if (!is_si_special(info) &&
1134 sig >= SIGRTMIN && info->si_code != SI_USER) {
1136 * Queue overflow, abort. We may abort if the
1137 * signal was rt and sent by user using something
1138 * other than kill().
1140 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1145 * This is a silent loss of information. We still
1146 * send the signal, but the *info bits are lost.
1148 result = TRACE_SIGNAL_LOSE_INFO;
1152 signalfd_notify(t, sig);
1153 sigaddset(&pending->signal, sig);
1155 /* Let multiprocess signals appear after on-going forks */
1156 if (type > PIDTYPE_TGID) {
1157 struct multiprocess_signals *delayed;
1158 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1159 sigset_t *signal = &delayed->signal;
1160 /* Can't queue both a stop and a continue signal */
1162 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1163 else if (sig_kernel_stop(sig))
1164 sigdelset(signal, SIGCONT);
1165 sigaddset(signal, sig);
1169 complete_signal(sig, t, type);
1171 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1175 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1178 switch (siginfo_layout(info->si_signo, info->si_code)) {
1187 case SIL_FAULT_MCEERR:
1188 case SIL_FAULT_BNDERR:
1189 case SIL_FAULT_PKUERR:
1197 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1200 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1203 if (info == SEND_SIG_NOINFO) {
1204 /* Force if sent from an ancestor pid namespace */
1205 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1206 } else if (info == SEND_SIG_PRIV) {
1207 /* Don't ignore kernel generated signals */
1209 } else if (has_si_pid_and_uid(info)) {
1210 /* SIGKILL and SIGSTOP is special or has ids */
1211 struct user_namespace *t_user_ns;
1214 t_user_ns = task_cred_xxx(t, user_ns);
1215 if (current_user_ns() != t_user_ns) {
1216 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1217 info->si_uid = from_kuid_munged(t_user_ns, uid);
1221 /* A kernel generated signal? */
1222 force = (info->si_code == SI_KERNEL);
1224 /* From an ancestor pid namespace? */
1225 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1230 return __send_signal(sig, info, t, type, force);
1233 static void print_fatal_signal(int signr)
1235 struct pt_regs *regs = signal_pt_regs();
1236 pr_info("potentially unexpected fatal signal %d.\n", signr);
1238 #if defined(__i386__) && !defined(__arch_um__)
1239 pr_info("code at %08lx: ", regs->ip);
1242 for (i = 0; i < 16; i++) {
1245 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1247 pr_cont("%02x ", insn);
1257 static int __init setup_print_fatal_signals(char *str)
1259 get_option (&str, &print_fatal_signals);
1264 __setup("print-fatal-signals=", setup_print_fatal_signals);
1267 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1269 return send_signal(sig, info, p, PIDTYPE_TGID);
1272 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1275 unsigned long flags;
1278 if (lock_task_sighand(p, &flags)) {
1279 ret = send_signal(sig, info, p, type);
1280 unlock_task_sighand(p, &flags);
1287 * Force a signal that the process can't ignore: if necessary
1288 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1290 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1291 * since we do not want to have a signal handler that was blocked
1292 * be invoked when user space had explicitly blocked it.
1294 * We don't want to have recursive SIGSEGV's etc, for example,
1295 * that is why we also clear SIGNAL_UNKILLABLE.
1298 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1300 unsigned long int flags;
1301 int ret, blocked, ignored;
1302 struct k_sigaction *action;
1303 int sig = info->si_signo;
1305 spin_lock_irqsave(&t->sighand->siglock, flags);
1306 action = &t->sighand->action[sig-1];
1307 ignored = action->sa.sa_handler == SIG_IGN;
1308 blocked = sigismember(&t->blocked, sig);
1309 if (blocked || ignored) {
1310 action->sa.sa_handler = SIG_DFL;
1312 sigdelset(&t->blocked, sig);
1313 recalc_sigpending_and_wake(t);
1317 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1318 * debugging to leave init killable.
1320 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1321 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1322 ret = send_signal(sig, info, t, PIDTYPE_PID);
1323 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1328 int force_sig_info(struct kernel_siginfo *info)
1330 return force_sig_info_to_task(info, current);
1334 * Nuke all other threads in the group.
1336 int zap_other_threads(struct task_struct *p)
1338 struct task_struct *t = p;
1341 p->signal->group_stop_count = 0;
1343 while_each_thread(p, t) {
1344 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1347 /* Don't bother with already dead threads */
1350 sigaddset(&t->pending.signal, SIGKILL);
1351 signal_wake_up(t, 1);
1357 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1358 unsigned long *flags)
1360 struct sighand_struct *sighand;
1364 sighand = rcu_dereference(tsk->sighand);
1365 if (unlikely(sighand == NULL))
1369 * This sighand can be already freed and even reused, but
1370 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1371 * initializes ->siglock: this slab can't go away, it has
1372 * the same object type, ->siglock can't be reinitialized.
1374 * We need to ensure that tsk->sighand is still the same
1375 * after we take the lock, we can race with de_thread() or
1376 * __exit_signal(). In the latter case the next iteration
1377 * must see ->sighand == NULL.
1379 spin_lock_irqsave(&sighand->siglock, *flags);
1380 if (likely(sighand == tsk->sighand))
1382 spin_unlock_irqrestore(&sighand->siglock, *flags);
1390 * send signal info to all the members of a group
1392 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1393 struct task_struct *p, enum pid_type type)
1398 ret = check_kill_permission(sig, info, p);
1402 ret = do_send_sig_info(sig, info, p, type);
1408 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1409 * control characters do (^C, ^Z etc)
1410 * - the caller must hold at least a readlock on tasklist_lock
1412 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1414 struct task_struct *p = NULL;
1415 int retval, success;
1419 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1420 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1423 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1424 return success ? 0 : retval;
1427 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1430 struct task_struct *p;
1434 p = pid_task(pid, PIDTYPE_PID);
1436 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1438 if (likely(!p || error != -ESRCH))
1442 * The task was unhashed in between, try again. If it
1443 * is dead, pid_task() will return NULL, if we race with
1444 * de_thread() it will find the new leader.
1449 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1453 error = kill_pid_info(sig, info, find_vpid(pid));
1458 static inline bool kill_as_cred_perm(const struct cred *cred,
1459 struct task_struct *target)
1461 const struct cred *pcred = __task_cred(target);
1463 return uid_eq(cred->euid, pcred->suid) ||
1464 uid_eq(cred->euid, pcred->uid) ||
1465 uid_eq(cred->uid, pcred->suid) ||
1466 uid_eq(cred->uid, pcred->uid);
1470 * The usb asyncio usage of siginfo is wrong. The glibc support
1471 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1472 * AKA after the generic fields:
1473 * kernel_pid_t si_pid;
1474 * kernel_uid32_t si_uid;
1475 * sigval_t si_value;
1477 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1478 * after the generic fields is:
1479 * void __user *si_addr;
1481 * This is a practical problem when there is a 64bit big endian kernel
1482 * and a 32bit userspace. As the 32bit address will encoded in the low
1483 * 32bits of the pointer. Those low 32bits will be stored at higher
1484 * address than appear in a 32 bit pointer. So userspace will not
1485 * see the address it was expecting for it's completions.
1487 * There is nothing in the encoding that can allow
1488 * copy_siginfo_to_user32 to detect this confusion of formats, so
1489 * handle this by requiring the caller of kill_pid_usb_asyncio to
1490 * notice when this situration takes place and to store the 32bit
1491 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1494 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1495 struct pid *pid, const struct cred *cred)
1497 struct kernel_siginfo info;
1498 struct task_struct *p;
1499 unsigned long flags;
1502 clear_siginfo(&info);
1503 info.si_signo = sig;
1504 info.si_errno = errno;
1505 info.si_code = SI_ASYNCIO;
1506 *((sigval_t *)&info.si_pid) = addr;
1508 if (!valid_signal(sig))
1512 p = pid_task(pid, PIDTYPE_PID);
1517 if (!kill_as_cred_perm(cred, p)) {
1521 ret = security_task_kill(p, &info, sig, cred);
1526 if (lock_task_sighand(p, &flags)) {
1527 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1528 unlock_task_sighand(p, &flags);
1536 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1539 * kill_something_info() interprets pid in interesting ways just like kill(2).
1541 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1542 * is probably wrong. Should make it like BSD or SYSV.
1545 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1551 ret = kill_pid_info(sig, info, find_vpid(pid));
1556 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1560 read_lock(&tasklist_lock);
1562 ret = __kill_pgrp_info(sig, info,
1563 pid ? find_vpid(-pid) : task_pgrp(current));
1565 int retval = 0, count = 0;
1566 struct task_struct * p;
1568 for_each_process(p) {
1569 if (task_pid_vnr(p) > 1 &&
1570 !same_thread_group(p, current)) {
1571 int err = group_send_sig_info(sig, info, p,
1578 ret = count ? retval : -ESRCH;
1580 read_unlock(&tasklist_lock);
1586 * These are for backward compatibility with the rest of the kernel source.
1589 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1592 * Make sure legacy kernel users don't send in bad values
1593 * (normal paths check this in check_kill_permission).
1595 if (!valid_signal(sig))
1598 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1600 EXPORT_SYMBOL(send_sig_info);
1602 #define __si_special(priv) \
1603 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1606 send_sig(int sig, struct task_struct *p, int priv)
1608 return send_sig_info(sig, __si_special(priv), p);
1610 EXPORT_SYMBOL(send_sig);
1612 void force_sig(int sig)
1614 struct kernel_siginfo info;
1616 clear_siginfo(&info);
1617 info.si_signo = sig;
1619 info.si_code = SI_KERNEL;
1622 force_sig_info(&info);
1624 EXPORT_SYMBOL(force_sig);
1627 * When things go south during signal handling, we
1628 * will force a SIGSEGV. And if the signal that caused
1629 * the problem was already a SIGSEGV, we'll want to
1630 * make sure we don't even try to deliver the signal..
1632 void force_sigsegv(int sig)
1634 struct task_struct *p = current;
1636 if (sig == SIGSEGV) {
1637 unsigned long flags;
1638 spin_lock_irqsave(&p->sighand->siglock, flags);
1639 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1640 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1645 int force_sig_fault_to_task(int sig, int code, void __user *addr
1646 ___ARCH_SI_TRAPNO(int trapno)
1647 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1648 , struct task_struct *t)
1650 struct kernel_siginfo info;
1652 clear_siginfo(&info);
1653 info.si_signo = sig;
1655 info.si_code = code;
1656 info.si_addr = addr;
1657 #ifdef __ARCH_SI_TRAPNO
1658 info.si_trapno = trapno;
1662 info.si_flags = flags;
1665 return force_sig_info_to_task(&info, t);
1668 int force_sig_fault(int sig, int code, void __user *addr
1669 ___ARCH_SI_TRAPNO(int trapno)
1670 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1672 return force_sig_fault_to_task(sig, code, addr
1673 ___ARCH_SI_TRAPNO(trapno)
1674 ___ARCH_SI_IA64(imm, flags, isr), current);
1677 int send_sig_fault(int sig, int code, void __user *addr
1678 ___ARCH_SI_TRAPNO(int trapno)
1679 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1680 , struct task_struct *t)
1682 struct kernel_siginfo info;
1684 clear_siginfo(&info);
1685 info.si_signo = sig;
1687 info.si_code = code;
1688 info.si_addr = addr;
1689 #ifdef __ARCH_SI_TRAPNO
1690 info.si_trapno = trapno;
1694 info.si_flags = flags;
1697 return send_sig_info(info.si_signo, &info, t);
1700 int force_sig_mceerr(int code, void __user *addr, short lsb)
1702 struct kernel_siginfo info;
1704 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1705 clear_siginfo(&info);
1706 info.si_signo = SIGBUS;
1708 info.si_code = code;
1709 info.si_addr = addr;
1710 info.si_addr_lsb = lsb;
1711 return force_sig_info(&info);
1714 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1716 struct kernel_siginfo info;
1718 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1719 clear_siginfo(&info);
1720 info.si_signo = SIGBUS;
1722 info.si_code = code;
1723 info.si_addr = addr;
1724 info.si_addr_lsb = lsb;
1725 return send_sig_info(info.si_signo, &info, t);
1727 EXPORT_SYMBOL(send_sig_mceerr);
1729 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1731 struct kernel_siginfo info;
1733 clear_siginfo(&info);
1734 info.si_signo = SIGSEGV;
1736 info.si_code = SEGV_BNDERR;
1737 info.si_addr = addr;
1738 info.si_lower = lower;
1739 info.si_upper = upper;
1740 return force_sig_info(&info);
1744 int force_sig_pkuerr(void __user *addr, u32 pkey)
1746 struct kernel_siginfo info;
1748 clear_siginfo(&info);
1749 info.si_signo = SIGSEGV;
1751 info.si_code = SEGV_PKUERR;
1752 info.si_addr = addr;
1753 info.si_pkey = pkey;
1754 return force_sig_info(&info);
1758 /* For the crazy architectures that include trap information in
1759 * the errno field, instead of an actual errno value.
1761 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1763 struct kernel_siginfo info;
1765 clear_siginfo(&info);
1766 info.si_signo = SIGTRAP;
1767 info.si_errno = errno;
1768 info.si_code = TRAP_HWBKPT;
1769 info.si_addr = addr;
1770 return force_sig_info(&info);
1773 int kill_pgrp(struct pid *pid, int sig, int priv)
1777 read_lock(&tasklist_lock);
1778 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1779 read_unlock(&tasklist_lock);
1783 EXPORT_SYMBOL(kill_pgrp);
1785 int kill_pid(struct pid *pid, int sig, int priv)
1787 return kill_pid_info(sig, __si_special(priv), pid);
1789 EXPORT_SYMBOL(kill_pid);
1792 * These functions support sending signals using preallocated sigqueue
1793 * structures. This is needed "because realtime applications cannot
1794 * afford to lose notifications of asynchronous events, like timer
1795 * expirations or I/O completions". In the case of POSIX Timers
1796 * we allocate the sigqueue structure from the timer_create. If this
1797 * allocation fails we are able to report the failure to the application
1798 * with an EAGAIN error.
1800 struct sigqueue *sigqueue_alloc(void)
1802 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1805 q->flags |= SIGQUEUE_PREALLOC;
1810 void sigqueue_free(struct sigqueue *q)
1812 unsigned long flags;
1813 spinlock_t *lock = ¤t->sighand->siglock;
1815 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1817 * We must hold ->siglock while testing q->list
1818 * to serialize with collect_signal() or with
1819 * __exit_signal()->flush_sigqueue().
1821 spin_lock_irqsave(lock, flags);
1822 q->flags &= ~SIGQUEUE_PREALLOC;
1824 * If it is queued it will be freed when dequeued,
1825 * like the "regular" sigqueue.
1827 if (!list_empty(&q->list))
1829 spin_unlock_irqrestore(lock, flags);
1835 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1837 int sig = q->info.si_signo;
1838 struct sigpending *pending;
1839 struct task_struct *t;
1840 unsigned long flags;
1843 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1847 t = pid_task(pid, type);
1848 if (!t || !likely(lock_task_sighand(t, &flags)))
1851 ret = 1; /* the signal is ignored */
1852 result = TRACE_SIGNAL_IGNORED;
1853 if (!prepare_signal(sig, t, false))
1857 if (unlikely(!list_empty(&q->list))) {
1859 * If an SI_TIMER entry is already queue just increment
1860 * the overrun count.
1862 BUG_ON(q->info.si_code != SI_TIMER);
1863 q->info.si_overrun++;
1864 result = TRACE_SIGNAL_ALREADY_PENDING;
1867 q->info.si_overrun = 0;
1869 signalfd_notify(t, sig);
1870 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1871 list_add_tail(&q->list, &pending->list);
1872 sigaddset(&pending->signal, sig);
1873 complete_signal(sig, t, type);
1874 result = TRACE_SIGNAL_DELIVERED;
1876 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1877 unlock_task_sighand(t, &flags);
1884 * Let a parent know about the death of a child.
1885 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1887 * Returns true if our parent ignored us and so we've switched to
1890 bool do_notify_parent(struct task_struct *tsk, int sig)
1892 struct kernel_siginfo info;
1893 unsigned long flags;
1894 struct sighand_struct *psig;
1895 bool autoreap = false;
1900 /* do_notify_parent_cldstop should have been called instead. */
1901 BUG_ON(task_is_stopped_or_traced(tsk));
1903 BUG_ON(!tsk->ptrace &&
1904 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1906 if (sig != SIGCHLD) {
1908 * This is only possible if parent == real_parent.
1909 * Check if it has changed security domain.
1911 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1915 clear_siginfo(&info);
1916 info.si_signo = sig;
1919 * We are under tasklist_lock here so our parent is tied to
1920 * us and cannot change.
1922 * task_active_pid_ns will always return the same pid namespace
1923 * until a task passes through release_task.
1925 * write_lock() currently calls preempt_disable() which is the
1926 * same as rcu_read_lock(), but according to Oleg, this is not
1927 * correct to rely on this
1930 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1931 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1935 task_cputime(tsk, &utime, &stime);
1936 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1937 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1939 info.si_status = tsk->exit_code & 0x7f;
1940 if (tsk->exit_code & 0x80)
1941 info.si_code = CLD_DUMPED;
1942 else if (tsk->exit_code & 0x7f)
1943 info.si_code = CLD_KILLED;
1945 info.si_code = CLD_EXITED;
1946 info.si_status = tsk->exit_code >> 8;
1949 psig = tsk->parent->sighand;
1950 spin_lock_irqsave(&psig->siglock, flags);
1951 if (!tsk->ptrace && sig == SIGCHLD &&
1952 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1953 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1955 * We are exiting and our parent doesn't care. POSIX.1
1956 * defines special semantics for setting SIGCHLD to SIG_IGN
1957 * or setting the SA_NOCLDWAIT flag: we should be reaped
1958 * automatically and not left for our parent's wait4 call.
1959 * Rather than having the parent do it as a magic kind of
1960 * signal handler, we just set this to tell do_exit that we
1961 * can be cleaned up without becoming a zombie. Note that
1962 * we still call __wake_up_parent in this case, because a
1963 * blocked sys_wait4 might now return -ECHILD.
1965 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1966 * is implementation-defined: we do (if you don't want
1967 * it, just use SIG_IGN instead).
1970 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1973 if (valid_signal(sig) && sig)
1974 __group_send_sig_info(sig, &info, tsk->parent);
1975 __wake_up_parent(tsk, tsk->parent);
1976 spin_unlock_irqrestore(&psig->siglock, flags);
1982 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1983 * @tsk: task reporting the state change
1984 * @for_ptracer: the notification is for ptracer
1985 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1987 * Notify @tsk's parent that the stopped/continued state has changed. If
1988 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1989 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1992 * Must be called with tasklist_lock at least read locked.
1994 static void do_notify_parent_cldstop(struct task_struct *tsk,
1995 bool for_ptracer, int why)
1997 struct kernel_siginfo info;
1998 unsigned long flags;
1999 struct task_struct *parent;
2000 struct sighand_struct *sighand;
2004 parent = tsk->parent;
2006 tsk = tsk->group_leader;
2007 parent = tsk->real_parent;
2010 clear_siginfo(&info);
2011 info.si_signo = SIGCHLD;
2014 * see comment in do_notify_parent() about the following 4 lines
2017 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2018 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2021 task_cputime(tsk, &utime, &stime);
2022 info.si_utime = nsec_to_clock_t(utime);
2023 info.si_stime = nsec_to_clock_t(stime);
2028 info.si_status = SIGCONT;
2031 info.si_status = tsk->signal->group_exit_code & 0x7f;
2034 info.si_status = tsk->exit_code & 0x7f;
2040 sighand = parent->sighand;
2041 spin_lock_irqsave(&sighand->siglock, flags);
2042 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2043 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2044 __group_send_sig_info(SIGCHLD, &info, parent);
2046 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2048 __wake_up_parent(tsk, parent);
2049 spin_unlock_irqrestore(&sighand->siglock, flags);
2052 static inline bool may_ptrace_stop(void)
2054 if (!likely(current->ptrace))
2057 * Are we in the middle of do_coredump?
2058 * If so and our tracer is also part of the coredump stopping
2059 * is a deadlock situation, and pointless because our tracer
2060 * is dead so don't allow us to stop.
2061 * If SIGKILL was already sent before the caller unlocked
2062 * ->siglock we must see ->core_state != NULL. Otherwise it
2063 * is safe to enter schedule().
2065 * This is almost outdated, a task with the pending SIGKILL can't
2066 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2067 * after SIGKILL was already dequeued.
2069 if (unlikely(current->mm->core_state) &&
2070 unlikely(current->mm == current->parent->mm))
2077 * Return non-zero if there is a SIGKILL that should be waking us up.
2078 * Called with the siglock held.
2080 static bool sigkill_pending(struct task_struct *tsk)
2082 return sigismember(&tsk->pending.signal, SIGKILL) ||
2083 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2087 * This must be called with current->sighand->siglock held.
2089 * This should be the path for all ptrace stops.
2090 * We always set current->last_siginfo while stopped here.
2091 * That makes it a way to test a stopped process for
2092 * being ptrace-stopped vs being job-control-stopped.
2094 * If we actually decide not to stop at all because the tracer
2095 * is gone, we keep current->exit_code unless clear_code.
2097 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2098 __releases(¤t->sighand->siglock)
2099 __acquires(¤t->sighand->siglock)
2101 bool gstop_done = false;
2103 if (arch_ptrace_stop_needed(exit_code, info)) {
2105 * The arch code has something special to do before a
2106 * ptrace stop. This is allowed to block, e.g. for faults
2107 * on user stack pages. We can't keep the siglock while
2108 * calling arch_ptrace_stop, so we must release it now.
2109 * To preserve proper semantics, we must do this before
2110 * any signal bookkeeping like checking group_stop_count.
2111 * Meanwhile, a SIGKILL could come in before we retake the
2112 * siglock. That must prevent us from sleeping in TASK_TRACED.
2113 * So after regaining the lock, we must check for SIGKILL.
2115 spin_unlock_irq(¤t->sighand->siglock);
2116 arch_ptrace_stop(exit_code, info);
2117 spin_lock_irq(¤t->sighand->siglock);
2118 if (sigkill_pending(current))
2122 set_special_state(TASK_TRACED);
2125 * We're committing to trapping. TRACED should be visible before
2126 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2127 * Also, transition to TRACED and updates to ->jobctl should be
2128 * atomic with respect to siglock and should be done after the arch
2129 * hook as siglock is released and regrabbed across it.
2134 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2136 * set_current_state() smp_wmb();
2138 * wait_task_stopped()
2139 * task_stopped_code()
2140 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2144 current->last_siginfo = info;
2145 current->exit_code = exit_code;
2148 * If @why is CLD_STOPPED, we're trapping to participate in a group
2149 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2150 * across siglock relocks since INTERRUPT was scheduled, PENDING
2151 * could be clear now. We act as if SIGCONT is received after
2152 * TASK_TRACED is entered - ignore it.
2154 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2155 gstop_done = task_participate_group_stop(current);
2157 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2158 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2159 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2160 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2162 /* entering a trap, clear TRAPPING */
2163 task_clear_jobctl_trapping(current);
2165 spin_unlock_irq(¤t->sighand->siglock);
2166 read_lock(&tasklist_lock);
2167 if (may_ptrace_stop()) {
2169 * Notify parents of the stop.
2171 * While ptraced, there are two parents - the ptracer and
2172 * the real_parent of the group_leader. The ptracer should
2173 * know about every stop while the real parent is only
2174 * interested in the completion of group stop. The states
2175 * for the two don't interact with each other. Notify
2176 * separately unless they're gonna be duplicates.
2178 do_notify_parent_cldstop(current, true, why);
2179 if (gstop_done && ptrace_reparented(current))
2180 do_notify_parent_cldstop(current, false, why);
2183 * Don't want to allow preemption here, because
2184 * sys_ptrace() needs this task to be inactive.
2186 * XXX: implement read_unlock_no_resched().
2189 read_unlock(&tasklist_lock);
2190 preempt_enable_no_resched();
2191 cgroup_enter_frozen();
2192 freezable_schedule();
2193 cgroup_leave_frozen(true);
2196 * By the time we got the lock, our tracer went away.
2197 * Don't drop the lock yet, another tracer may come.
2199 * If @gstop_done, the ptracer went away between group stop
2200 * completion and here. During detach, it would have set
2201 * JOBCTL_STOP_PENDING on us and we'll re-enter
2202 * TASK_STOPPED in do_signal_stop() on return, so notifying
2203 * the real parent of the group stop completion is enough.
2206 do_notify_parent_cldstop(current, false, why);
2208 /* tasklist protects us from ptrace_freeze_traced() */
2209 __set_current_state(TASK_RUNNING);
2211 current->exit_code = 0;
2212 read_unlock(&tasklist_lock);
2216 * We are back. Now reacquire the siglock before touching
2217 * last_siginfo, so that we are sure to have synchronized with
2218 * any signal-sending on another CPU that wants to examine it.
2220 spin_lock_irq(¤t->sighand->siglock);
2221 current->last_siginfo = NULL;
2223 /* LISTENING can be set only during STOP traps, clear it */
2224 current->jobctl &= ~JOBCTL_LISTENING;
2227 * Queued signals ignored us while we were stopped for tracing.
2228 * So check for any that we should take before resuming user mode.
2229 * This sets TIF_SIGPENDING, but never clears it.
2231 recalc_sigpending_tsk(current);
2234 static void ptrace_do_notify(int signr, int exit_code, int why)
2236 kernel_siginfo_t info;
2238 clear_siginfo(&info);
2239 info.si_signo = signr;
2240 info.si_code = exit_code;
2241 info.si_pid = task_pid_vnr(current);
2242 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2244 /* Let the debugger run. */
2245 ptrace_stop(exit_code, why, 1, &info);
2248 void ptrace_notify(int exit_code)
2250 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2251 if (unlikely(current->task_works))
2254 spin_lock_irq(¤t->sighand->siglock);
2255 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2256 spin_unlock_irq(¤t->sighand->siglock);
2260 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2261 * @signr: signr causing group stop if initiating
2263 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2264 * and participate in it. If already set, participate in the existing
2265 * group stop. If participated in a group stop (and thus slept), %true is
2266 * returned with siglock released.
2268 * If ptraced, this function doesn't handle stop itself. Instead,
2269 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2270 * untouched. The caller must ensure that INTERRUPT trap handling takes
2271 * places afterwards.
2274 * Must be called with @current->sighand->siglock held, which is released
2278 * %false if group stop is already cancelled or ptrace trap is scheduled.
2279 * %true if participated in group stop.
2281 static bool do_signal_stop(int signr)
2282 __releases(¤t->sighand->siglock)
2284 struct signal_struct *sig = current->signal;
2286 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2287 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2288 struct task_struct *t;
2290 /* signr will be recorded in task->jobctl for retries */
2291 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2293 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2294 unlikely(signal_group_exit(sig)))
2297 * There is no group stop already in progress. We must
2300 * While ptraced, a task may be resumed while group stop is
2301 * still in effect and then receive a stop signal and
2302 * initiate another group stop. This deviates from the
2303 * usual behavior as two consecutive stop signals can't
2304 * cause two group stops when !ptraced. That is why we
2305 * also check !task_is_stopped(t) below.
2307 * The condition can be distinguished by testing whether
2308 * SIGNAL_STOP_STOPPED is already set. Don't generate
2309 * group_exit_code in such case.
2311 * This is not necessary for SIGNAL_STOP_CONTINUED because
2312 * an intervening stop signal is required to cause two
2313 * continued events regardless of ptrace.
2315 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2316 sig->group_exit_code = signr;
2318 sig->group_stop_count = 0;
2320 if (task_set_jobctl_pending(current, signr | gstop))
2321 sig->group_stop_count++;
2324 while_each_thread(current, t) {
2326 * Setting state to TASK_STOPPED for a group
2327 * stop is always done with the siglock held,
2328 * so this check has no races.
2330 if (!task_is_stopped(t) &&
2331 task_set_jobctl_pending(t, signr | gstop)) {
2332 sig->group_stop_count++;
2333 if (likely(!(t->ptrace & PT_SEIZED)))
2334 signal_wake_up(t, 0);
2336 ptrace_trap_notify(t);
2341 if (likely(!current->ptrace)) {
2345 * If there are no other threads in the group, or if there
2346 * is a group stop in progress and we are the last to stop,
2347 * report to the parent.
2349 if (task_participate_group_stop(current))
2350 notify = CLD_STOPPED;
2352 set_special_state(TASK_STOPPED);
2353 spin_unlock_irq(¤t->sighand->siglock);
2356 * Notify the parent of the group stop completion. Because
2357 * we're not holding either the siglock or tasklist_lock
2358 * here, ptracer may attach inbetween; however, this is for
2359 * group stop and should always be delivered to the real
2360 * parent of the group leader. The new ptracer will get
2361 * its notification when this task transitions into
2365 read_lock(&tasklist_lock);
2366 do_notify_parent_cldstop(current, false, notify);
2367 read_unlock(&tasklist_lock);
2370 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2371 cgroup_enter_frozen();
2372 freezable_schedule();
2376 * While ptraced, group stop is handled by STOP trap.
2377 * Schedule it and let the caller deal with it.
2379 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2385 * do_jobctl_trap - take care of ptrace jobctl traps
2387 * When PT_SEIZED, it's used for both group stop and explicit
2388 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2389 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2390 * the stop signal; otherwise, %SIGTRAP.
2392 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2393 * number as exit_code and no siginfo.
2396 * Must be called with @current->sighand->siglock held, which may be
2397 * released and re-acquired before returning with intervening sleep.
2399 static void do_jobctl_trap(void)
2401 struct signal_struct *signal = current->signal;
2402 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2404 if (current->ptrace & PT_SEIZED) {
2405 if (!signal->group_stop_count &&
2406 !(signal->flags & SIGNAL_STOP_STOPPED))
2408 WARN_ON_ONCE(!signr);
2409 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2412 WARN_ON_ONCE(!signr);
2413 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2414 current->exit_code = 0;
2419 * do_freezer_trap - handle the freezer jobctl trap
2421 * Puts the task into frozen state, if only the task is not about to quit.
2422 * In this case it drops JOBCTL_TRAP_FREEZE.
2425 * Must be called with @current->sighand->siglock held,
2426 * which is always released before returning.
2428 static void do_freezer_trap(void)
2429 __releases(¤t->sighand->siglock)
2432 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2433 * let's make another loop to give it a chance to be handled.
2434 * In any case, we'll return back.
2436 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2437 JOBCTL_TRAP_FREEZE) {
2438 spin_unlock_irq(¤t->sighand->siglock);
2443 * Now we're sure that there is no pending fatal signal and no
2444 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2445 * immediately (if there is a non-fatal signal pending), and
2446 * put the task into sleep.
2448 __set_current_state(TASK_INTERRUPTIBLE);
2449 clear_thread_flag(TIF_SIGPENDING);
2450 spin_unlock_irq(¤t->sighand->siglock);
2451 cgroup_enter_frozen();
2452 freezable_schedule();
2455 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2458 * We do not check sig_kernel_stop(signr) but set this marker
2459 * unconditionally because we do not know whether debugger will
2460 * change signr. This flag has no meaning unless we are going
2461 * to stop after return from ptrace_stop(). In this case it will
2462 * be checked in do_signal_stop(), we should only stop if it was
2463 * not cleared by SIGCONT while we were sleeping. See also the
2464 * comment in dequeue_signal().
2466 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2467 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2469 /* We're back. Did the debugger cancel the sig? */
2470 signr = current->exit_code;
2474 current->exit_code = 0;
2477 * Update the siginfo structure if the signal has
2478 * changed. If the debugger wanted something
2479 * specific in the siginfo structure then it should
2480 * have updated *info via PTRACE_SETSIGINFO.
2482 if (signr != info->si_signo) {
2483 clear_siginfo(info);
2484 info->si_signo = signr;
2486 info->si_code = SI_USER;
2488 info->si_pid = task_pid_vnr(current->parent);
2489 info->si_uid = from_kuid_munged(current_user_ns(),
2490 task_uid(current->parent));
2494 /* If the (new) signal is now blocked, requeue it. */
2495 if (sigismember(¤t->blocked, signr)) {
2496 send_signal(signr, info, current, PIDTYPE_PID);
2503 bool get_signal(struct ksignal *ksig)
2505 struct sighand_struct *sighand = current->sighand;
2506 struct signal_struct *signal = current->signal;
2509 if (unlikely(current->task_works))
2512 if (unlikely(uprobe_deny_signal()))
2516 * Do this once, we can't return to user-mode if freezing() == T.
2517 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2518 * thus do not need another check after return.
2523 spin_lock_irq(&sighand->siglock);
2525 * Every stopped thread goes here after wakeup. Check to see if
2526 * we should notify the parent, prepare_signal(SIGCONT) encodes
2527 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2529 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2532 if (signal->flags & SIGNAL_CLD_CONTINUED)
2533 why = CLD_CONTINUED;
2537 signal->flags &= ~SIGNAL_CLD_MASK;
2539 spin_unlock_irq(&sighand->siglock);
2542 * Notify the parent that we're continuing. This event is
2543 * always per-process and doesn't make whole lot of sense
2544 * for ptracers, who shouldn't consume the state via
2545 * wait(2) either, but, for backward compatibility, notify
2546 * the ptracer of the group leader too unless it's gonna be
2549 read_lock(&tasklist_lock);
2550 do_notify_parent_cldstop(current, false, why);
2552 if (ptrace_reparented(current->group_leader))
2553 do_notify_parent_cldstop(current->group_leader,
2555 read_unlock(&tasklist_lock);
2560 /* Has this task already been marked for death? */
2561 if (signal_group_exit(signal)) {
2562 ksig->info.si_signo = signr = SIGKILL;
2563 sigdelset(¤t->pending.signal, SIGKILL);
2564 recalc_sigpending();
2569 struct k_sigaction *ka;
2571 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2575 if (unlikely(current->jobctl &
2576 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2577 if (current->jobctl & JOBCTL_TRAP_MASK) {
2579 spin_unlock_irq(&sighand->siglock);
2580 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2587 * If the task is leaving the frozen state, let's update
2588 * cgroup counters and reset the frozen bit.
2590 if (unlikely(cgroup_task_frozen(current))) {
2591 spin_unlock_irq(&sighand->siglock);
2592 cgroup_leave_frozen(false);
2597 * Signals generated by the execution of an instruction
2598 * need to be delivered before any other pending signals
2599 * so that the instruction pointer in the signal stack
2600 * frame points to the faulting instruction.
2602 signr = dequeue_synchronous_signal(&ksig->info);
2604 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2607 break; /* will return 0 */
2609 if (unlikely(current->ptrace) && signr != SIGKILL) {
2610 signr = ptrace_signal(signr, &ksig->info);
2615 ka = &sighand->action[signr-1];
2617 /* Trace actually delivered signals. */
2618 trace_signal_deliver(signr, &ksig->info, ka);
2620 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2622 if (ka->sa.sa_handler != SIG_DFL) {
2623 /* Run the handler. */
2626 if (ka->sa.sa_flags & SA_ONESHOT)
2627 ka->sa.sa_handler = SIG_DFL;
2629 break; /* will return non-zero "signr" value */
2633 * Now we are doing the default action for this signal.
2635 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2639 * Global init gets no signals it doesn't want.
2640 * Container-init gets no signals it doesn't want from same
2643 * Note that if global/container-init sees a sig_kernel_only()
2644 * signal here, the signal must have been generated internally
2645 * or must have come from an ancestor namespace. In either
2646 * case, the signal cannot be dropped.
2648 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2649 !sig_kernel_only(signr))
2652 if (sig_kernel_stop(signr)) {
2654 * The default action is to stop all threads in
2655 * the thread group. The job control signals
2656 * do nothing in an orphaned pgrp, but SIGSTOP
2657 * always works. Note that siglock needs to be
2658 * dropped during the call to is_orphaned_pgrp()
2659 * because of lock ordering with tasklist_lock.
2660 * This allows an intervening SIGCONT to be posted.
2661 * We need to check for that and bail out if necessary.
2663 if (signr != SIGSTOP) {
2664 spin_unlock_irq(&sighand->siglock);
2666 /* signals can be posted during this window */
2668 if (is_current_pgrp_orphaned())
2671 spin_lock_irq(&sighand->siglock);
2674 if (likely(do_signal_stop(ksig->info.si_signo))) {
2675 /* It released the siglock. */
2680 * We didn't actually stop, due to a race
2681 * with SIGCONT or something like that.
2687 spin_unlock_irq(&sighand->siglock);
2688 if (unlikely(cgroup_task_frozen(current)))
2689 cgroup_leave_frozen(true);
2692 * Anything else is fatal, maybe with a core dump.
2694 current->flags |= PF_SIGNALED;
2696 if (sig_kernel_coredump(signr)) {
2697 if (print_fatal_signals)
2698 print_fatal_signal(ksig->info.si_signo);
2699 proc_coredump_connector(current);
2701 * If it was able to dump core, this kills all
2702 * other threads in the group and synchronizes with
2703 * their demise. If we lost the race with another
2704 * thread getting here, it set group_exit_code
2705 * first and our do_group_exit call below will use
2706 * that value and ignore the one we pass it.
2708 do_coredump(&ksig->info);
2712 * Death signals, no core dump.
2714 do_group_exit(ksig->info.si_signo);
2717 spin_unlock_irq(&sighand->siglock);
2720 return ksig->sig > 0;
2724 * signal_delivered -
2725 * @ksig: kernel signal struct
2726 * @stepping: nonzero if debugger single-step or block-step in use
2728 * This function should be called when a signal has successfully been
2729 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2730 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2731 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2733 static void signal_delivered(struct ksignal *ksig, int stepping)
2737 /* A signal was successfully delivered, and the
2738 saved sigmask was stored on the signal frame,
2739 and will be restored by sigreturn. So we can
2740 simply clear the restore sigmask flag. */
2741 clear_restore_sigmask();
2743 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2744 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2745 sigaddset(&blocked, ksig->sig);
2746 set_current_blocked(&blocked);
2747 tracehook_signal_handler(stepping);
2750 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2753 force_sigsegv(ksig->sig);
2755 signal_delivered(ksig, stepping);
2759 * It could be that complete_signal() picked us to notify about the
2760 * group-wide signal. Other threads should be notified now to take
2761 * the shared signals in @which since we will not.
2763 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2766 struct task_struct *t;
2768 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2769 if (sigisemptyset(&retarget))
2773 while_each_thread(tsk, t) {
2774 if (t->flags & PF_EXITING)
2777 if (!has_pending_signals(&retarget, &t->blocked))
2779 /* Remove the signals this thread can handle. */
2780 sigandsets(&retarget, &retarget, &t->blocked);
2782 if (!signal_pending(t))
2783 signal_wake_up(t, 0);
2785 if (sigisemptyset(&retarget))
2790 void exit_signals(struct task_struct *tsk)
2796 * @tsk is about to have PF_EXITING set - lock out users which
2797 * expect stable threadgroup.
2799 cgroup_threadgroup_change_begin(tsk);
2801 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2802 tsk->flags |= PF_EXITING;
2803 cgroup_threadgroup_change_end(tsk);
2807 spin_lock_irq(&tsk->sighand->siglock);
2809 * From now this task is not visible for group-wide signals,
2810 * see wants_signal(), do_signal_stop().
2812 tsk->flags |= PF_EXITING;
2814 cgroup_threadgroup_change_end(tsk);
2816 if (!signal_pending(tsk))
2819 unblocked = tsk->blocked;
2820 signotset(&unblocked);
2821 retarget_shared_pending(tsk, &unblocked);
2823 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2824 task_participate_group_stop(tsk))
2825 group_stop = CLD_STOPPED;
2827 spin_unlock_irq(&tsk->sighand->siglock);
2830 * If group stop has completed, deliver the notification. This
2831 * should always go to the real parent of the group leader.
2833 if (unlikely(group_stop)) {
2834 read_lock(&tasklist_lock);
2835 do_notify_parent_cldstop(tsk, false, group_stop);
2836 read_unlock(&tasklist_lock);
2841 * System call entry points.
2845 * sys_restart_syscall - restart a system call
2847 SYSCALL_DEFINE0(restart_syscall)
2849 struct restart_block *restart = ¤t->restart_block;
2850 return restart->fn(restart);
2853 long do_no_restart_syscall(struct restart_block *param)
2858 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2860 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2861 sigset_t newblocked;
2862 /* A set of now blocked but previously unblocked signals. */
2863 sigandnsets(&newblocked, newset, ¤t->blocked);
2864 retarget_shared_pending(tsk, &newblocked);
2866 tsk->blocked = *newset;
2867 recalc_sigpending();
2871 * set_current_blocked - change current->blocked mask
2874 * It is wrong to change ->blocked directly, this helper should be used
2875 * to ensure the process can't miss a shared signal we are going to block.
2877 void set_current_blocked(sigset_t *newset)
2879 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2880 __set_current_blocked(newset);
2883 void __set_current_blocked(const sigset_t *newset)
2885 struct task_struct *tsk = current;
2888 * In case the signal mask hasn't changed, there is nothing we need
2889 * to do. The current->blocked shouldn't be modified by other task.
2891 if (sigequalsets(&tsk->blocked, newset))
2894 spin_lock_irq(&tsk->sighand->siglock);
2895 __set_task_blocked(tsk, newset);
2896 spin_unlock_irq(&tsk->sighand->siglock);
2900 * This is also useful for kernel threads that want to temporarily
2901 * (or permanently) block certain signals.
2903 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2904 * interface happily blocks "unblockable" signals like SIGKILL
2907 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2909 struct task_struct *tsk = current;
2912 /* Lockless, only current can change ->blocked, never from irq */
2914 *oldset = tsk->blocked;
2918 sigorsets(&newset, &tsk->blocked, set);
2921 sigandnsets(&newset, &tsk->blocked, set);
2930 __set_current_blocked(&newset);
2933 EXPORT_SYMBOL(sigprocmask);
2936 * The api helps set app-provided sigmasks.
2938 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2939 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2941 int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
2942 sigset_t *oldset, size_t sigsetsize)
2947 if (sigsetsize != sizeof(sigset_t))
2949 if (copy_from_user(set, usigmask, sizeof(sigset_t)))
2952 *oldset = current->blocked;
2953 set_current_blocked(set);
2957 EXPORT_SYMBOL(set_user_sigmask);
2959 #ifdef CONFIG_COMPAT
2960 int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
2961 sigset_t *set, sigset_t *oldset,
2967 if (sigsetsize != sizeof(compat_sigset_t))
2969 if (get_compat_sigset(set, usigmask))
2972 *oldset = current->blocked;
2973 set_current_blocked(set);
2977 EXPORT_SYMBOL(set_compat_user_sigmask);
2981 * restore_user_sigmask:
2982 * usigmask: sigmask passed in from userland.
2983 * sigsaved: saved sigmask when the syscall started and changed the sigmask to
2986 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2987 * epoll_pwait where a new sigmask is passed in from userland for the syscalls.
2989 void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
2995 * When signals are pending, do not restore them here.
2996 * Restoring sigmask here can lead to delivering signals that the above
2997 * syscalls are intended to block because of the sigmask passed in.
2999 if (signal_pending(current)) {
3000 current->saved_sigmask = *sigsaved;
3001 set_restore_sigmask();
3006 * This is needed because the fast syscall return path does not restore
3007 * saved_sigmask when signals are not pending.
3009 set_current_blocked(sigsaved);
3011 EXPORT_SYMBOL(restore_user_sigmask);
3014 * sys_rt_sigprocmask - change the list of currently blocked signals
3015 * @how: whether to add, remove, or set signals
3016 * @nset: stores pending signals
3017 * @oset: previous value of signal mask if non-null
3018 * @sigsetsize: size of sigset_t type
3020 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3021 sigset_t __user *, oset, size_t, sigsetsize)
3023 sigset_t old_set, new_set;
3026 /* XXX: Don't preclude handling different sized sigset_t's. */
3027 if (sigsetsize != sizeof(sigset_t))
3030 old_set = current->blocked;
3033 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3035 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3037 error = sigprocmask(how, &new_set, NULL);
3043 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3050 #ifdef CONFIG_COMPAT
3051 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3052 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3054 sigset_t old_set = current->blocked;
3056 /* XXX: Don't preclude handling different sized sigset_t's. */
3057 if (sigsetsize != sizeof(sigset_t))
3063 if (get_compat_sigset(&new_set, nset))
3065 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3067 error = sigprocmask(how, &new_set, NULL);
3071 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3075 static void do_sigpending(sigset_t *set)
3077 spin_lock_irq(¤t->sighand->siglock);
3078 sigorsets(set, ¤t->pending.signal,
3079 ¤t->signal->shared_pending.signal);
3080 spin_unlock_irq(¤t->sighand->siglock);
3082 /* Outside the lock because only this thread touches it. */
3083 sigandsets(set, ¤t->blocked, set);
3087 * sys_rt_sigpending - examine a pending signal that has been raised
3089 * @uset: stores pending signals
3090 * @sigsetsize: size of sigset_t type or larger
3092 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3096 if (sigsetsize > sizeof(*uset))
3099 do_sigpending(&set);
3101 if (copy_to_user(uset, &set, sigsetsize))
3107 #ifdef CONFIG_COMPAT
3108 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3109 compat_size_t, sigsetsize)
3113 if (sigsetsize > sizeof(*uset))
3116 do_sigpending(&set);
3118 return put_compat_sigset(uset, &set, sigsetsize);
3122 static const struct {
3123 unsigned char limit, layout;
3125 [SIGILL] = { NSIGILL, SIL_FAULT },
3126 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3127 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3128 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3129 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3131 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3133 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3134 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3135 [SIGSYS] = { NSIGSYS, SIL_SYS },
3138 static bool known_siginfo_layout(unsigned sig, int si_code)
3140 if (si_code == SI_KERNEL)
3142 else if ((si_code > SI_USER)) {
3143 if (sig_specific_sicodes(sig)) {
3144 if (si_code <= sig_sicodes[sig].limit)
3147 else if (si_code <= NSIGPOLL)
3150 else if (si_code >= SI_DETHREAD)
3152 else if (si_code == SI_ASYNCNL)
3157 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3159 enum siginfo_layout layout = SIL_KILL;
3160 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3161 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3162 (si_code <= sig_sicodes[sig].limit)) {
3163 layout = sig_sicodes[sig].layout;
3164 /* Handle the exceptions */
3165 if ((sig == SIGBUS) &&
3166 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3167 layout = SIL_FAULT_MCEERR;
3168 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3169 layout = SIL_FAULT_BNDERR;
3171 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3172 layout = SIL_FAULT_PKUERR;
3175 else if (si_code <= NSIGPOLL)
3178 if (si_code == SI_TIMER)
3180 else if (si_code == SI_SIGIO)
3182 else if (si_code < 0)
3188 static inline char __user *si_expansion(const siginfo_t __user *info)
3190 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3193 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3195 char __user *expansion = si_expansion(to);
3196 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3198 if (clear_user(expansion, SI_EXPANSION_SIZE))
3203 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3204 const siginfo_t __user *from)
3206 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3207 char __user *expansion = si_expansion(from);
3208 char buf[SI_EXPANSION_SIZE];
3211 * An unknown si_code might need more than
3212 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3213 * extra bytes are 0. This guarantees copy_siginfo_to_user
3214 * will return this data to userspace exactly.
3216 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3218 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3226 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3227 const siginfo_t __user *from)
3229 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3231 to->si_signo = signo;
3232 return post_copy_siginfo_from_user(to, from);
3235 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3237 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3239 return post_copy_siginfo_from_user(to, from);
3242 #ifdef CONFIG_COMPAT
3243 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3244 const struct kernel_siginfo *from)
3245 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3247 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3249 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3250 const struct kernel_siginfo *from, bool x32_ABI)
3253 struct compat_siginfo new;
3254 memset(&new, 0, sizeof(new));
3256 new.si_signo = from->si_signo;
3257 new.si_errno = from->si_errno;
3258 new.si_code = from->si_code;
3259 switch(siginfo_layout(from->si_signo, from->si_code)) {
3261 new.si_pid = from->si_pid;
3262 new.si_uid = from->si_uid;
3265 new.si_tid = from->si_tid;
3266 new.si_overrun = from->si_overrun;
3267 new.si_int = from->si_int;
3270 new.si_band = from->si_band;
3271 new.si_fd = from->si_fd;
3274 new.si_addr = ptr_to_compat(from->si_addr);
3275 #ifdef __ARCH_SI_TRAPNO
3276 new.si_trapno = from->si_trapno;
3279 case SIL_FAULT_MCEERR:
3280 new.si_addr = ptr_to_compat(from->si_addr);
3281 #ifdef __ARCH_SI_TRAPNO
3282 new.si_trapno = from->si_trapno;
3284 new.si_addr_lsb = from->si_addr_lsb;
3286 case SIL_FAULT_BNDERR:
3287 new.si_addr = ptr_to_compat(from->si_addr);
3288 #ifdef __ARCH_SI_TRAPNO
3289 new.si_trapno = from->si_trapno;
3291 new.si_lower = ptr_to_compat(from->si_lower);
3292 new.si_upper = ptr_to_compat(from->si_upper);
3294 case SIL_FAULT_PKUERR:
3295 new.si_addr = ptr_to_compat(from->si_addr);
3296 #ifdef __ARCH_SI_TRAPNO
3297 new.si_trapno = from->si_trapno;
3299 new.si_pkey = from->si_pkey;
3302 new.si_pid = from->si_pid;
3303 new.si_uid = from->si_uid;
3304 new.si_status = from->si_status;
3305 #ifdef CONFIG_X86_X32_ABI
3307 new._sifields._sigchld_x32._utime = from->si_utime;
3308 new._sifields._sigchld_x32._stime = from->si_stime;
3312 new.si_utime = from->si_utime;
3313 new.si_stime = from->si_stime;
3317 new.si_pid = from->si_pid;
3318 new.si_uid = from->si_uid;
3319 new.si_int = from->si_int;
3322 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3323 new.si_syscall = from->si_syscall;
3324 new.si_arch = from->si_arch;
3328 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3334 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3335 const struct compat_siginfo *from)
3338 to->si_signo = from->si_signo;
3339 to->si_errno = from->si_errno;
3340 to->si_code = from->si_code;
3341 switch(siginfo_layout(from->si_signo, from->si_code)) {
3343 to->si_pid = from->si_pid;
3344 to->si_uid = from->si_uid;
3347 to->si_tid = from->si_tid;
3348 to->si_overrun = from->si_overrun;
3349 to->si_int = from->si_int;
3352 to->si_band = from->si_band;
3353 to->si_fd = from->si_fd;
3356 to->si_addr = compat_ptr(from->si_addr);
3357 #ifdef __ARCH_SI_TRAPNO
3358 to->si_trapno = from->si_trapno;
3361 case SIL_FAULT_MCEERR:
3362 to->si_addr = compat_ptr(from->si_addr);
3363 #ifdef __ARCH_SI_TRAPNO
3364 to->si_trapno = from->si_trapno;
3366 to->si_addr_lsb = from->si_addr_lsb;
3368 case SIL_FAULT_BNDERR:
3369 to->si_addr = compat_ptr(from->si_addr);
3370 #ifdef __ARCH_SI_TRAPNO
3371 to->si_trapno = from->si_trapno;
3373 to->si_lower = compat_ptr(from->si_lower);
3374 to->si_upper = compat_ptr(from->si_upper);
3376 case SIL_FAULT_PKUERR:
3377 to->si_addr = compat_ptr(from->si_addr);
3378 #ifdef __ARCH_SI_TRAPNO
3379 to->si_trapno = from->si_trapno;
3381 to->si_pkey = from->si_pkey;
3384 to->si_pid = from->si_pid;
3385 to->si_uid = from->si_uid;
3386 to->si_status = from->si_status;
3387 #ifdef CONFIG_X86_X32_ABI
3388 if (in_x32_syscall()) {
3389 to->si_utime = from->_sifields._sigchld_x32._utime;
3390 to->si_stime = from->_sifields._sigchld_x32._stime;
3394 to->si_utime = from->si_utime;
3395 to->si_stime = from->si_stime;
3399 to->si_pid = from->si_pid;
3400 to->si_uid = from->si_uid;
3401 to->si_int = from->si_int;
3404 to->si_call_addr = compat_ptr(from->si_call_addr);
3405 to->si_syscall = from->si_syscall;
3406 to->si_arch = from->si_arch;
3412 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3413 const struct compat_siginfo __user *ufrom)
3415 struct compat_siginfo from;
3417 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3420 from.si_signo = signo;
3421 return post_copy_siginfo_from_user32(to, &from);
3424 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3425 const struct compat_siginfo __user *ufrom)
3427 struct compat_siginfo from;
3429 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3432 return post_copy_siginfo_from_user32(to, &from);
3434 #endif /* CONFIG_COMPAT */
3437 * do_sigtimedwait - wait for queued signals specified in @which
3438 * @which: queued signals to wait for
3439 * @info: if non-null, the signal's siginfo is returned here
3440 * @ts: upper bound on process time suspension
3442 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3443 const struct timespec64 *ts)
3445 ktime_t *to = NULL, timeout = KTIME_MAX;
3446 struct task_struct *tsk = current;
3447 sigset_t mask = *which;
3451 if (!timespec64_valid(ts))
3453 timeout = timespec64_to_ktime(*ts);
3458 * Invert the set of allowed signals to get those we want to block.
3460 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3463 spin_lock_irq(&tsk->sighand->siglock);
3464 sig = dequeue_signal(tsk, &mask, info);
3465 if (!sig && timeout) {
3467 * None ready, temporarily unblock those we're interested
3468 * while we are sleeping in so that we'll be awakened when
3469 * they arrive. Unblocking is always fine, we can avoid
3470 * set_current_blocked().
3472 tsk->real_blocked = tsk->blocked;
3473 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3474 recalc_sigpending();
3475 spin_unlock_irq(&tsk->sighand->siglock);
3477 __set_current_state(TASK_INTERRUPTIBLE);
3478 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3480 spin_lock_irq(&tsk->sighand->siglock);
3481 __set_task_blocked(tsk, &tsk->real_blocked);
3482 sigemptyset(&tsk->real_blocked);
3483 sig = dequeue_signal(tsk, &mask, info);
3485 spin_unlock_irq(&tsk->sighand->siglock);
3489 return ret ? -EINTR : -EAGAIN;
3493 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3495 * @uthese: queued signals to wait for
3496 * @uinfo: if non-null, the signal's siginfo is returned here
3497 * @uts: upper bound on process time suspension
3498 * @sigsetsize: size of sigset_t type
3500 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3501 siginfo_t __user *, uinfo,
3502 const struct __kernel_timespec __user *, uts,
3506 struct timespec64 ts;
3507 kernel_siginfo_t info;
3510 /* XXX: Don't preclude handling different sized sigset_t's. */
3511 if (sigsetsize != sizeof(sigset_t))
3514 if (copy_from_user(&these, uthese, sizeof(these)))
3518 if (get_timespec64(&ts, uts))
3522 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3524 if (ret > 0 && uinfo) {
3525 if (copy_siginfo_to_user(uinfo, &info))
3532 #ifdef CONFIG_COMPAT_32BIT_TIME
3533 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3534 siginfo_t __user *, uinfo,
3535 const struct old_timespec32 __user *, uts,
3539 struct timespec64 ts;
3540 kernel_siginfo_t info;
3543 if (sigsetsize != sizeof(sigset_t))
3546 if (copy_from_user(&these, uthese, sizeof(these)))
3550 if (get_old_timespec32(&ts, uts))
3554 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3556 if (ret > 0 && uinfo) {
3557 if (copy_siginfo_to_user(uinfo, &info))
3565 #ifdef CONFIG_COMPAT
3566 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3567 struct compat_siginfo __user *, uinfo,
3568 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3571 struct timespec64 t;
3572 kernel_siginfo_t info;
3575 if (sigsetsize != sizeof(sigset_t))
3578 if (get_compat_sigset(&s, uthese))
3582 if (get_timespec64(&t, uts))
3586 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3588 if (ret > 0 && uinfo) {
3589 if (copy_siginfo_to_user32(uinfo, &info))
3596 #ifdef CONFIG_COMPAT_32BIT_TIME
3597 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3598 struct compat_siginfo __user *, uinfo,
3599 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3602 struct timespec64 t;
3603 kernel_siginfo_t info;
3606 if (sigsetsize != sizeof(sigset_t))
3609 if (get_compat_sigset(&s, uthese))
3613 if (get_old_timespec32(&t, uts))
3617 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3619 if (ret > 0 && uinfo) {
3620 if (copy_siginfo_to_user32(uinfo, &info))
3629 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3631 clear_siginfo(info);
3632 info->si_signo = sig;
3634 info->si_code = SI_USER;
3635 info->si_pid = task_tgid_vnr(current);
3636 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3640 * sys_kill - send a signal to a process
3641 * @pid: the PID of the process
3642 * @sig: signal to be sent
3644 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3646 struct kernel_siginfo info;
3648 prepare_kill_siginfo(sig, &info);
3650 return kill_something_info(sig, &info, pid);
3654 * Verify that the signaler and signalee either are in the same pid namespace
3655 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3658 static bool access_pidfd_pidns(struct pid *pid)
3660 struct pid_namespace *active = task_active_pid_ns(current);
3661 struct pid_namespace *p = ns_of_pid(pid);
3674 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3676 #ifdef CONFIG_COMPAT
3678 * Avoid hooking up compat syscalls and instead handle necessary
3679 * conversions here. Note, this is a stop-gap measure and should not be
3680 * considered a generic solution.
3682 if (in_compat_syscall())
3683 return copy_siginfo_from_user32(
3684 kinfo, (struct compat_siginfo __user *)info);
3686 return copy_siginfo_from_user(kinfo, info);
3689 static struct pid *pidfd_to_pid(const struct file *file)
3691 if (file->f_op == &pidfd_fops)
3692 return file->private_data;
3694 return tgid_pidfd_to_pid(file);
3698 * sys_pidfd_send_signal - send a signal to a process through a task file
3700 * @pidfd: the file descriptor of the process
3701 * @sig: signal to be sent
3702 * @info: the signal info
3703 * @flags: future flags to be passed
3705 * The syscall currently only signals via PIDTYPE_PID which covers
3706 * kill(<positive-pid>, <signal>. It does not signal threads or process
3708 * In order to extend the syscall to threads and process groups the @flags
3709 * argument should be used. In essence, the @flags argument will determine
3710 * what is signaled and not the file descriptor itself. Put in other words,
3711 * grouping is a property of the flags argument not a property of the file
3714 * Return: 0 on success, negative errno on failure
3716 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3717 siginfo_t __user *, info, unsigned int, flags)
3722 kernel_siginfo_t kinfo;
3724 /* Enforce flags be set to 0 until we add an extension. */
3732 /* Is this a pidfd? */
3733 pid = pidfd_to_pid(f.file);
3740 if (!access_pidfd_pidns(pid))
3744 ret = copy_siginfo_from_user_any(&kinfo, info);
3749 if (unlikely(sig != kinfo.si_signo))
3752 /* Only allow sending arbitrary signals to yourself. */
3754 if ((task_pid(current) != pid) &&
3755 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3758 prepare_kill_siginfo(sig, &kinfo);
3761 ret = kill_pid_info(sig, &kinfo, pid);
3769 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3771 struct task_struct *p;
3775 p = find_task_by_vpid(pid);
3776 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3777 error = check_kill_permission(sig, info, p);
3779 * The null signal is a permissions and process existence
3780 * probe. No signal is actually delivered.
3782 if (!error && sig) {
3783 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3785 * If lock_task_sighand() failed we pretend the task
3786 * dies after receiving the signal. The window is tiny,
3787 * and the signal is private anyway.
3789 if (unlikely(error == -ESRCH))
3798 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3800 struct kernel_siginfo info;
3802 clear_siginfo(&info);
3803 info.si_signo = sig;
3805 info.si_code = SI_TKILL;
3806 info.si_pid = task_tgid_vnr(current);
3807 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3809 return do_send_specific(tgid, pid, sig, &info);
3813 * sys_tgkill - send signal to one specific thread
3814 * @tgid: the thread group ID of the thread
3815 * @pid: the PID of the thread
3816 * @sig: signal to be sent
3818 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3819 * exists but it's not belonging to the target process anymore. This
3820 * method solves the problem of threads exiting and PIDs getting reused.
3822 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3824 /* This is only valid for single tasks */
3825 if (pid <= 0 || tgid <= 0)
3828 return do_tkill(tgid, pid, sig);
3832 * sys_tkill - send signal to one specific task
3833 * @pid: the PID of the task
3834 * @sig: signal to be sent
3836 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3838 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3840 /* This is only valid for single tasks */
3844 return do_tkill(0, pid, sig);
3847 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3849 /* Not even root can pretend to send signals from the kernel.
3850 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3852 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3853 (task_pid_vnr(current) != pid))
3856 /* POSIX.1b doesn't mention process groups. */
3857 return kill_proc_info(sig, info, pid);
3861 * sys_rt_sigqueueinfo - send signal information to a signal
3862 * @pid: the PID of the thread
3863 * @sig: signal to be sent
3864 * @uinfo: signal info to be sent
3866 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3867 siginfo_t __user *, uinfo)
3869 kernel_siginfo_t info;
3870 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3873 return do_rt_sigqueueinfo(pid, sig, &info);
3876 #ifdef CONFIG_COMPAT
3877 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3880 struct compat_siginfo __user *, uinfo)
3882 kernel_siginfo_t info;
3883 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3886 return do_rt_sigqueueinfo(pid, sig, &info);
3890 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3892 /* This is only valid for single tasks */
3893 if (pid <= 0 || tgid <= 0)
3896 /* Not even root can pretend to send signals from the kernel.
3897 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3899 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3900 (task_pid_vnr(current) != pid))
3903 return do_send_specific(tgid, pid, sig, info);
3906 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3907 siginfo_t __user *, uinfo)
3909 kernel_siginfo_t info;
3910 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3913 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3916 #ifdef CONFIG_COMPAT
3917 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3921 struct compat_siginfo __user *, uinfo)
3923 kernel_siginfo_t info;
3924 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3927 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3932 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3934 void kernel_sigaction(int sig, __sighandler_t action)
3936 spin_lock_irq(¤t->sighand->siglock);
3937 current->sighand->action[sig - 1].sa.sa_handler = action;
3938 if (action == SIG_IGN) {
3942 sigaddset(&mask, sig);
3944 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3945 flush_sigqueue_mask(&mask, ¤t->pending);
3946 recalc_sigpending();
3948 spin_unlock_irq(¤t->sighand->siglock);
3950 EXPORT_SYMBOL(kernel_sigaction);
3952 void __weak sigaction_compat_abi(struct k_sigaction *act,
3953 struct k_sigaction *oact)
3957 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3959 struct task_struct *p = current, *t;
3960 struct k_sigaction *k;
3963 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3966 k = &p->sighand->action[sig-1];
3968 spin_lock_irq(&p->sighand->siglock);
3972 sigaction_compat_abi(act, oact);
3975 sigdelsetmask(&act->sa.sa_mask,
3976 sigmask(SIGKILL) | sigmask(SIGSTOP));
3980 * "Setting a signal action to SIG_IGN for a signal that is
3981 * pending shall cause the pending signal to be discarded,
3982 * whether or not it is blocked."
3984 * "Setting a signal action to SIG_DFL for a signal that is
3985 * pending and whose default action is to ignore the signal
3986 * (for example, SIGCHLD), shall cause the pending signal to
3987 * be discarded, whether or not it is blocked"
3989 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3991 sigaddset(&mask, sig);
3992 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3993 for_each_thread(p, t)
3994 flush_sigqueue_mask(&mask, &t->pending);
3998 spin_unlock_irq(&p->sighand->siglock);
4003 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4006 struct task_struct *t = current;
4009 memset(oss, 0, sizeof(stack_t));
4010 oss->ss_sp = (void __user *) t->sas_ss_sp;
4011 oss->ss_size = t->sas_ss_size;
4012 oss->ss_flags = sas_ss_flags(sp) |
4013 (current->sas_ss_flags & SS_FLAG_BITS);
4017 void __user *ss_sp = ss->ss_sp;
4018 size_t ss_size = ss->ss_size;
4019 unsigned ss_flags = ss->ss_flags;
4022 if (unlikely(on_sig_stack(sp)))
4025 ss_mode = ss_flags & ~SS_FLAG_BITS;
4026 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4030 if (ss_mode == SS_DISABLE) {
4034 if (unlikely(ss_size < min_ss_size))
4038 t->sas_ss_sp = (unsigned long) ss_sp;
4039 t->sas_ss_size = ss_size;
4040 t->sas_ss_flags = ss_flags;
4045 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4049 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4051 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4052 current_user_stack_pointer(),
4054 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4059 int restore_altstack(const stack_t __user *uss)
4062 if (copy_from_user(&new, uss, sizeof(stack_t)))
4064 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4066 /* squash all but EFAULT for now */
4070 int __save_altstack(stack_t __user *uss, unsigned long sp)
4072 struct task_struct *t = current;
4073 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4074 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4075 __put_user(t->sas_ss_size, &uss->ss_size);
4078 if (t->sas_ss_flags & SS_AUTODISARM)
4083 #ifdef CONFIG_COMPAT
4084 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4085 compat_stack_t __user *uoss_ptr)
4091 compat_stack_t uss32;
4092 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4094 uss.ss_sp = compat_ptr(uss32.ss_sp);
4095 uss.ss_flags = uss32.ss_flags;
4096 uss.ss_size = uss32.ss_size;
4098 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4099 compat_user_stack_pointer(),
4100 COMPAT_MINSIGSTKSZ);
4101 if (ret >= 0 && uoss_ptr) {
4103 memset(&old, 0, sizeof(old));
4104 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4105 old.ss_flags = uoss.ss_flags;
4106 old.ss_size = uoss.ss_size;
4107 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4113 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4114 const compat_stack_t __user *, uss_ptr,
4115 compat_stack_t __user *, uoss_ptr)
4117 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4120 int compat_restore_altstack(const compat_stack_t __user *uss)
4122 int err = do_compat_sigaltstack(uss, NULL);
4123 /* squash all but -EFAULT for now */
4124 return err == -EFAULT ? err : 0;
4127 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4130 struct task_struct *t = current;
4131 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4133 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4134 __put_user(t->sas_ss_size, &uss->ss_size);
4137 if (t->sas_ss_flags & SS_AUTODISARM)
4143 #ifdef __ARCH_WANT_SYS_SIGPENDING
4146 * sys_sigpending - examine pending signals
4147 * @uset: where mask of pending signal is returned
4149 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4153 if (sizeof(old_sigset_t) > sizeof(*uset))
4156 do_sigpending(&set);
4158 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4164 #ifdef CONFIG_COMPAT
4165 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4169 do_sigpending(&set);
4171 return put_user(set.sig[0], set32);
4177 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4179 * sys_sigprocmask - examine and change blocked signals
4180 * @how: whether to add, remove, or set signals
4181 * @nset: signals to add or remove (if non-null)
4182 * @oset: previous value of signal mask if non-null
4184 * Some platforms have their own version with special arguments;
4185 * others support only sys_rt_sigprocmask.
4188 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4189 old_sigset_t __user *, oset)
4191 old_sigset_t old_set, new_set;
4192 sigset_t new_blocked;
4194 old_set = current->blocked.sig[0];
4197 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4200 new_blocked = current->blocked;
4204 sigaddsetmask(&new_blocked, new_set);
4207 sigdelsetmask(&new_blocked, new_set);
4210 new_blocked.sig[0] = new_set;
4216 set_current_blocked(&new_blocked);
4220 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4226 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4228 #ifndef CONFIG_ODD_RT_SIGACTION
4230 * sys_rt_sigaction - alter an action taken by a process
4231 * @sig: signal to be sent
4232 * @act: new sigaction
4233 * @oact: used to save the previous sigaction
4234 * @sigsetsize: size of sigset_t type
4236 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4237 const struct sigaction __user *, act,
4238 struct sigaction __user *, oact,
4241 struct k_sigaction new_sa, old_sa;
4244 /* XXX: Don't preclude handling different sized sigset_t's. */
4245 if (sigsetsize != sizeof(sigset_t))
4248 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4251 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4255 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4260 #ifdef CONFIG_COMPAT
4261 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4262 const struct compat_sigaction __user *, act,
4263 struct compat_sigaction __user *, oact,
4264 compat_size_t, sigsetsize)
4266 struct k_sigaction new_ka, old_ka;
4267 #ifdef __ARCH_HAS_SA_RESTORER
4268 compat_uptr_t restorer;
4272 /* XXX: Don't preclude handling different sized sigset_t's. */
4273 if (sigsetsize != sizeof(compat_sigset_t))
4277 compat_uptr_t handler;
4278 ret = get_user(handler, &act->sa_handler);
4279 new_ka.sa.sa_handler = compat_ptr(handler);
4280 #ifdef __ARCH_HAS_SA_RESTORER
4281 ret |= get_user(restorer, &act->sa_restorer);
4282 new_ka.sa.sa_restorer = compat_ptr(restorer);
4284 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4285 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4290 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4292 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4294 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4295 sizeof(oact->sa_mask));
4296 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4297 #ifdef __ARCH_HAS_SA_RESTORER
4298 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4299 &oact->sa_restorer);
4305 #endif /* !CONFIG_ODD_RT_SIGACTION */
4307 #ifdef CONFIG_OLD_SIGACTION
4308 SYSCALL_DEFINE3(sigaction, int, sig,
4309 const struct old_sigaction __user *, act,
4310 struct old_sigaction __user *, oact)
4312 struct k_sigaction new_ka, old_ka;
4317 if (!access_ok(act, sizeof(*act)) ||
4318 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4319 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4320 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4321 __get_user(mask, &act->sa_mask))
4323 #ifdef __ARCH_HAS_KA_RESTORER
4324 new_ka.ka_restorer = NULL;
4326 siginitset(&new_ka.sa.sa_mask, mask);
4329 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4332 if (!access_ok(oact, sizeof(*oact)) ||
4333 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4334 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4335 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4336 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4343 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4344 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4345 const struct compat_old_sigaction __user *, act,
4346 struct compat_old_sigaction __user *, oact)
4348 struct k_sigaction new_ka, old_ka;
4350 compat_old_sigset_t mask;
4351 compat_uptr_t handler, restorer;
4354 if (!access_ok(act, sizeof(*act)) ||
4355 __get_user(handler, &act->sa_handler) ||
4356 __get_user(restorer, &act->sa_restorer) ||
4357 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4358 __get_user(mask, &act->sa_mask))
4361 #ifdef __ARCH_HAS_KA_RESTORER
4362 new_ka.ka_restorer = NULL;
4364 new_ka.sa.sa_handler = compat_ptr(handler);
4365 new_ka.sa.sa_restorer = compat_ptr(restorer);
4366 siginitset(&new_ka.sa.sa_mask, mask);
4369 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4372 if (!access_ok(oact, sizeof(*oact)) ||
4373 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4374 &oact->sa_handler) ||
4375 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4376 &oact->sa_restorer) ||
4377 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4378 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4385 #ifdef CONFIG_SGETMASK_SYSCALL
4388 * For backwards compatibility. Functionality superseded by sigprocmask.
4390 SYSCALL_DEFINE0(sgetmask)
4393 return current->blocked.sig[0];
4396 SYSCALL_DEFINE1(ssetmask, int, newmask)
4398 int old = current->blocked.sig[0];
4401 siginitset(&newset, newmask);
4402 set_current_blocked(&newset);
4406 #endif /* CONFIG_SGETMASK_SYSCALL */
4408 #ifdef __ARCH_WANT_SYS_SIGNAL
4410 * For backwards compatibility. Functionality superseded by sigaction.
4412 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4414 struct k_sigaction new_sa, old_sa;
4417 new_sa.sa.sa_handler = handler;
4418 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4419 sigemptyset(&new_sa.sa.sa_mask);
4421 ret = do_sigaction(sig, &new_sa, &old_sa);
4423 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4425 #endif /* __ARCH_WANT_SYS_SIGNAL */
4427 #ifdef __ARCH_WANT_SYS_PAUSE
4429 SYSCALL_DEFINE0(pause)
4431 while (!signal_pending(current)) {
4432 __set_current_state(TASK_INTERRUPTIBLE);
4435 return -ERESTARTNOHAND;
4440 static int sigsuspend(sigset_t *set)
4442 current->saved_sigmask = current->blocked;
4443 set_current_blocked(set);
4445 while (!signal_pending(current)) {
4446 __set_current_state(TASK_INTERRUPTIBLE);
4449 set_restore_sigmask();
4450 return -ERESTARTNOHAND;
4454 * sys_rt_sigsuspend - replace the signal mask for a value with the
4455 * @unewset value until a signal is received
4456 * @unewset: new signal mask value
4457 * @sigsetsize: size of sigset_t type
4459 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4463 /* XXX: Don't preclude handling different sized sigset_t's. */
4464 if (sigsetsize != sizeof(sigset_t))
4467 if (copy_from_user(&newset, unewset, sizeof(newset)))
4469 return sigsuspend(&newset);
4472 #ifdef CONFIG_COMPAT
4473 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4477 /* XXX: Don't preclude handling different sized sigset_t's. */
4478 if (sigsetsize != sizeof(sigset_t))
4481 if (get_compat_sigset(&newset, unewset))
4483 return sigsuspend(&newset);
4487 #ifdef CONFIG_OLD_SIGSUSPEND
4488 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4491 siginitset(&blocked, mask);
4492 return sigsuspend(&blocked);
4495 #ifdef CONFIG_OLD_SIGSUSPEND3
4496 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4499 siginitset(&blocked, mask);
4500 return sigsuspend(&blocked);
4504 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4509 static inline void siginfo_buildtime_checks(void)
4511 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4513 /* Verify the offsets in the two siginfos match */
4514 #define CHECK_OFFSET(field) \
4515 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4518 CHECK_OFFSET(si_pid);
4519 CHECK_OFFSET(si_uid);
4522 CHECK_OFFSET(si_tid);
4523 CHECK_OFFSET(si_overrun);
4524 CHECK_OFFSET(si_value);
4527 CHECK_OFFSET(si_pid);
4528 CHECK_OFFSET(si_uid);
4529 CHECK_OFFSET(si_value);
4532 CHECK_OFFSET(si_pid);
4533 CHECK_OFFSET(si_uid);
4534 CHECK_OFFSET(si_status);
4535 CHECK_OFFSET(si_utime);
4536 CHECK_OFFSET(si_stime);
4539 CHECK_OFFSET(si_addr);
4540 CHECK_OFFSET(si_addr_lsb);
4541 CHECK_OFFSET(si_lower);
4542 CHECK_OFFSET(si_upper);
4543 CHECK_OFFSET(si_pkey);
4546 CHECK_OFFSET(si_band);
4547 CHECK_OFFSET(si_fd);
4550 CHECK_OFFSET(si_call_addr);
4551 CHECK_OFFSET(si_syscall);
4552 CHECK_OFFSET(si_arch);
4556 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4557 offsetof(struct siginfo, si_addr));
4558 if (sizeof(int) == sizeof(void __user *)) {
4559 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4560 sizeof(void __user *));
4562 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4563 sizeof_field(struct siginfo, si_uid)) !=
4564 sizeof(void __user *));
4565 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4566 offsetof(struct siginfo, si_uid));
4568 #ifdef CONFIG_COMPAT
4569 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4570 offsetof(struct compat_siginfo, si_addr));
4571 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4572 sizeof(compat_uptr_t));
4573 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4574 sizeof_field(struct siginfo, si_pid));
4578 void __init signals_init(void)
4580 siginfo_buildtime_checks();
4582 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4585 #ifdef CONFIG_KGDB_KDB
4586 #include <linux/kdb.h>
4588 * kdb_send_sig - Allows kdb to send signals without exposing
4589 * signal internals. This function checks if the required locks are
4590 * available before calling the main signal code, to avoid kdb
4593 void kdb_send_sig(struct task_struct *t, int sig)
4595 static struct task_struct *kdb_prev_t;
4597 if (!spin_trylock(&t->sighand->siglock)) {
4598 kdb_printf("Can't do kill command now.\n"
4599 "The sigmask lock is held somewhere else in "
4600 "kernel, try again later\n");
4603 new_t = kdb_prev_t != t;
4605 if (t->state != TASK_RUNNING && new_t) {
4606 spin_unlock(&t->sighand->siglock);
4607 kdb_printf("Process is not RUNNING, sending a signal from "
4608 "kdb risks deadlock\n"
4609 "on the run queue locks. "
4610 "The signal has _not_ been sent.\n"
4611 "Reissue the kill command if you want to risk "
4615 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4616 spin_unlock(&t->sighand->siglock);
4618 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4621 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4623 #endif /* CONFIG_KGDB_KDB */