]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/kernel/signal.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * | |
7 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson | |
8 | * | |
9 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. | |
10 | * Changes to use preallocated sigqueue structures | |
11 | * to allow signals to be sent reliably. | |
12 | */ | |
13 | ||
1da177e4 | 14 | #include <linux/slab.h> |
9984de1a | 15 | #include <linux/export.h> |
1da177e4 | 16 | #include <linux/init.h> |
589ee628 | 17 | #include <linux/sched/mm.h> |
8703e8a4 | 18 | #include <linux/sched/user.h> |
b17b0153 | 19 | #include <linux/sched/debug.h> |
29930025 | 20 | #include <linux/sched/task.h> |
68db0cf1 | 21 | #include <linux/sched/task_stack.h> |
32ef5517 | 22 | #include <linux/sched/cputime.h> |
3eb39f47 | 23 | #include <linux/file.h> |
1da177e4 | 24 | #include <linux/fs.h> |
3eb39f47 | 25 | #include <linux/proc_fs.h> |
1da177e4 LT |
26 | #include <linux/tty.h> |
27 | #include <linux/binfmts.h> | |
179899fd | 28 | #include <linux/coredump.h> |
1da177e4 LT |
29 | #include <linux/security.h> |
30 | #include <linux/syscalls.h> | |
31 | #include <linux/ptrace.h> | |
7ed20e1a | 32 | #include <linux/signal.h> |
fba2afaa | 33 | #include <linux/signalfd.h> |
f84d49b2 | 34 | #include <linux/ratelimit.h> |
35de254d | 35 | #include <linux/tracehook.h> |
c59ede7b | 36 | #include <linux/capability.h> |
7dfb7103 | 37 | #include <linux/freezer.h> |
84d73786 SB |
38 | #include <linux/pid_namespace.h> |
39 | #include <linux/nsproxy.h> | |
6b550f94 | 40 | #include <linux/user_namespace.h> |
0326f5a9 | 41 | #include <linux/uprobes.h> |
90268439 | 42 | #include <linux/compat.h> |
2b5faa4c | 43 | #include <linux/cn_proc.h> |
52f5684c | 44 | #include <linux/compiler.h> |
31ea70e0 | 45 | #include <linux/posix-timers.h> |
43347d56 | 46 | #include <linux/livepatch.h> |
76f969e8 | 47 | #include <linux/cgroup.h> |
b48345aa | 48 | #include <linux/audit.h> |
52f5684c | 49 | |
d1eb650f MH |
50 | #define CREATE_TRACE_POINTS |
51 | #include <trace/events/signal.h> | |
84d73786 | 52 | |
1da177e4 | 53 | #include <asm/param.h> |
7c0f6ba6 | 54 | #include <linux/uaccess.h> |
1da177e4 LT |
55 | #include <asm/unistd.h> |
56 | #include <asm/siginfo.h> | |
d550bbd4 | 57 | #include <asm/cacheflush.h> |
1da177e4 LT |
58 | |
59 | /* | |
60 | * SLAB caches for signal bits. | |
61 | */ | |
62 | ||
e18b890b | 63 | static struct kmem_cache *sigqueue_cachep; |
1da177e4 | 64 | |
f84d49b2 NO |
65 | int print_fatal_signals __read_mostly; |
66 | ||
35de254d | 67 | static void __user *sig_handler(struct task_struct *t, int sig) |
93585eea | 68 | { |
35de254d RM |
69 | return t->sighand->action[sig - 1].sa.sa_handler; |
70 | } | |
93585eea | 71 | |
e4a8b4ef | 72 | static inline bool sig_handler_ignored(void __user *handler, int sig) |
35de254d | 73 | { |
93585eea | 74 | /* Is it explicitly or implicitly ignored? */ |
93585eea | 75 | return handler == SIG_IGN || |
e4a8b4ef | 76 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
93585eea | 77 | } |
1da177e4 | 78 | |
41aaa481 | 79 | static bool sig_task_ignored(struct task_struct *t, int sig, bool force) |
1da177e4 | 80 | { |
35de254d | 81 | void __user *handler; |
1da177e4 | 82 | |
f008faff ON |
83 | handler = sig_handler(t, sig); |
84 | ||
86989c41 EB |
85 | /* SIGKILL and SIGSTOP may not be sent to the global init */ |
86 | if (unlikely(is_global_init(t) && sig_kernel_only(sig))) | |
87 | return true; | |
88 | ||
f008faff | 89 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
ac253850 | 90 | handler == SIG_DFL && !(force && sig_kernel_only(sig))) |
41aaa481 | 91 | return true; |
f008faff | 92 | |
33da8e7c EB |
93 | /* Only allow kernel generated signals to this kthread */ |
94 | if (unlikely((t->flags & PF_KTHREAD) && | |
95 | (handler == SIG_KTHREAD_KERNEL) && !force)) | |
96 | return true; | |
97 | ||
f008faff ON |
98 | return sig_handler_ignored(handler, sig); |
99 | } | |
100 | ||
6a0cdcd7 | 101 | static bool sig_ignored(struct task_struct *t, int sig, bool force) |
f008faff | 102 | { |
1da177e4 LT |
103 | /* |
104 | * Blocked signals are never ignored, since the | |
105 | * signal handler may change by the time it is | |
106 | * unblocked. | |
107 | */ | |
325d22df | 108 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
6a0cdcd7 | 109 | return false; |
1da177e4 | 110 | |
35de254d | 111 | /* |
628c1bcb ON |
112 | * Tracers may want to know about even ignored signal unless it |
113 | * is SIGKILL which can't be reported anyway but can be ignored | |
114 | * by SIGNAL_UNKILLABLE task. | |
35de254d | 115 | */ |
628c1bcb | 116 | if (t->ptrace && sig != SIGKILL) |
6a0cdcd7 | 117 | return false; |
628c1bcb ON |
118 | |
119 | return sig_task_ignored(t, sig, force); | |
1da177e4 LT |
120 | } |
121 | ||
122 | /* | |
123 | * Re-calculate pending state from the set of locally pending | |
124 | * signals, globally pending signals, and blocked signals. | |
125 | */ | |
938696a8 | 126 | static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) |
1da177e4 LT |
127 | { |
128 | unsigned long ready; | |
129 | long i; | |
130 | ||
131 | switch (_NSIG_WORDS) { | |
132 | default: | |
133 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) | |
134 | ready |= signal->sig[i] &~ blocked->sig[i]; | |
135 | break; | |
136 | ||
137 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; | |
138 | ready |= signal->sig[2] &~ blocked->sig[2]; | |
139 | ready |= signal->sig[1] &~ blocked->sig[1]; | |
140 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
141 | break; | |
142 | ||
143 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; | |
144 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
145 | break; | |
146 | ||
147 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; | |
148 | } | |
149 | return ready != 0; | |
150 | } | |
151 | ||
152 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) | |
153 | ||
09ae854e | 154 | static bool recalc_sigpending_tsk(struct task_struct *t) |
1da177e4 | 155 | { |
76f969e8 | 156 | if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || |
1da177e4 | 157 | PENDING(&t->pending, &t->blocked) || |
76f969e8 RG |
158 | PENDING(&t->signal->shared_pending, &t->blocked) || |
159 | cgroup_task_frozen(t)) { | |
1da177e4 | 160 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
09ae854e | 161 | return true; |
7bb44ade | 162 | } |
09ae854e | 163 | |
b74d0deb RM |
164 | /* |
165 | * We must never clear the flag in another thread, or in current | |
166 | * when it's possible the current syscall is returning -ERESTART*. | |
167 | * So we don't clear it here, and only callers who know they should do. | |
168 | */ | |
09ae854e | 169 | return false; |
7bb44ade RM |
170 | } |
171 | ||
172 | /* | |
173 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. | |
174 | * This is superfluous when called on current, the wakeup is a harmless no-op. | |
175 | */ | |
176 | void recalc_sigpending_and_wake(struct task_struct *t) | |
177 | { | |
178 | if (recalc_sigpending_tsk(t)) | |
179 | signal_wake_up(t, 0); | |
1da177e4 LT |
180 | } |
181 | ||
182 | void recalc_sigpending(void) | |
183 | { | |
43347d56 MB |
184 | if (!recalc_sigpending_tsk(current) && !freezing(current) && |
185 | !klp_patch_pending(current)) | |
b74d0deb RM |
186 | clear_thread_flag(TIF_SIGPENDING); |
187 | ||
1da177e4 | 188 | } |
fb50f5a4 | 189 | EXPORT_SYMBOL(recalc_sigpending); |
1da177e4 | 190 | |
088fe47c EB |
191 | void calculate_sigpending(void) |
192 | { | |
193 | /* Have any signals or users of TIF_SIGPENDING been delayed | |
194 | * until after fork? | |
195 | */ | |
196 | spin_lock_irq(¤t->sighand->siglock); | |
197 | set_tsk_thread_flag(current, TIF_SIGPENDING); | |
198 | recalc_sigpending(); | |
199 | spin_unlock_irq(¤t->sighand->siglock); | |
200 | } | |
201 | ||
1da177e4 LT |
202 | /* Given the mask, find the first available signal that should be serviced. */ |
203 | ||
a27341cd LT |
204 | #define SYNCHRONOUS_MASK \ |
205 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ | |
a0727e8c | 206 | sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) |
a27341cd | 207 | |
fba2afaa | 208 | int next_signal(struct sigpending *pending, sigset_t *mask) |
1da177e4 LT |
209 | { |
210 | unsigned long i, *s, *m, x; | |
211 | int sig = 0; | |
f84d49b2 | 212 | |
1da177e4 LT |
213 | s = pending->signal.sig; |
214 | m = mask->sig; | |
a27341cd LT |
215 | |
216 | /* | |
217 | * Handle the first word specially: it contains the | |
218 | * synchronous signals that need to be dequeued first. | |
219 | */ | |
220 | x = *s &~ *m; | |
221 | if (x) { | |
222 | if (x & SYNCHRONOUS_MASK) | |
223 | x &= SYNCHRONOUS_MASK; | |
224 | sig = ffz(~x) + 1; | |
225 | return sig; | |
226 | } | |
227 | ||
1da177e4 LT |
228 | switch (_NSIG_WORDS) { |
229 | default: | |
a27341cd LT |
230 | for (i = 1; i < _NSIG_WORDS; ++i) { |
231 | x = *++s &~ *++m; | |
232 | if (!x) | |
233 | continue; | |
234 | sig = ffz(~x) + i*_NSIG_BPW + 1; | |
235 | break; | |
236 | } | |
1da177e4 LT |
237 | break; |
238 | ||
a27341cd LT |
239 | case 2: |
240 | x = s[1] &~ m[1]; | |
241 | if (!x) | |
1da177e4 | 242 | break; |
a27341cd | 243 | sig = ffz(~x) + _NSIG_BPW + 1; |
1da177e4 LT |
244 | break; |
245 | ||
a27341cd LT |
246 | case 1: |
247 | /* Nothing to do */ | |
1da177e4 LT |
248 | break; |
249 | } | |
f84d49b2 | 250 | |
1da177e4 LT |
251 | return sig; |
252 | } | |
253 | ||
f84d49b2 NO |
254 | static inline void print_dropped_signal(int sig) |
255 | { | |
256 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | |
257 | ||
258 | if (!print_fatal_signals) | |
259 | return; | |
260 | ||
261 | if (!__ratelimit(&ratelimit_state)) | |
262 | return; | |
263 | ||
747800ef | 264 | pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", |
f84d49b2 NO |
265 | current->comm, current->pid, sig); |
266 | } | |
267 | ||
d79fdd6d | 268 | /** |
7dd3db54 | 269 | * task_set_jobctl_pending - set jobctl pending bits |
d79fdd6d | 270 | * @task: target task |
7dd3db54 | 271 | * @mask: pending bits to set |
d79fdd6d | 272 | * |
7dd3db54 TH |
273 | * Clear @mask from @task->jobctl. @mask must be subset of |
274 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | | |
275 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is | |
276 | * cleared. If @task is already being killed or exiting, this function | |
277 | * becomes noop. | |
278 | * | |
279 | * CONTEXT: | |
280 | * Must be called with @task->sighand->siglock held. | |
281 | * | |
282 | * RETURNS: | |
283 | * %true if @mask is set, %false if made noop because @task was dying. | |
284 | */ | |
b76808e6 | 285 | bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) |
7dd3db54 TH |
286 | { |
287 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | | |
288 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); | |
289 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); | |
290 | ||
291 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) | |
292 | return false; | |
293 | ||
294 | if (mask & JOBCTL_STOP_SIGMASK) | |
295 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; | |
296 | ||
297 | task->jobctl |= mask; | |
298 | return true; | |
299 | } | |
300 | ||
d79fdd6d | 301 | /** |
a8f072c1 | 302 | * task_clear_jobctl_trapping - clear jobctl trapping bit |
d79fdd6d TH |
303 | * @task: target task |
304 | * | |
a8f072c1 TH |
305 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. |
306 | * Clear it and wake up the ptracer. Note that we don't need any further | |
307 | * locking. @task->siglock guarantees that @task->parent points to the | |
308 | * ptracer. | |
d79fdd6d TH |
309 | * |
310 | * CONTEXT: | |
311 | * Must be called with @task->sighand->siglock held. | |
312 | */ | |
73ddff2b | 313 | void task_clear_jobctl_trapping(struct task_struct *task) |
d79fdd6d | 314 | { |
a8f072c1 TH |
315 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
316 | task->jobctl &= ~JOBCTL_TRAPPING; | |
650226bd | 317 | smp_mb(); /* advised by wake_up_bit() */ |
62c124ff | 318 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
d79fdd6d TH |
319 | } |
320 | } | |
321 | ||
e5c1902e | 322 | /** |
3759a0d9 | 323 | * task_clear_jobctl_pending - clear jobctl pending bits |
e5c1902e | 324 | * @task: target task |
3759a0d9 | 325 | * @mask: pending bits to clear |
e5c1902e | 326 | * |
3759a0d9 TH |
327 | * Clear @mask from @task->jobctl. @mask must be subset of |
328 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other | |
329 | * STOP bits are cleared together. | |
e5c1902e | 330 | * |
6dfca329 TH |
331 | * If clearing of @mask leaves no stop or trap pending, this function calls |
332 | * task_clear_jobctl_trapping(). | |
e5c1902e TH |
333 | * |
334 | * CONTEXT: | |
335 | * Must be called with @task->sighand->siglock held. | |
336 | */ | |
b76808e6 | 337 | void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) |
e5c1902e | 338 | { |
3759a0d9 TH |
339 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); |
340 | ||
341 | if (mask & JOBCTL_STOP_PENDING) | |
342 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; | |
343 | ||
344 | task->jobctl &= ~mask; | |
6dfca329 TH |
345 | |
346 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) | |
347 | task_clear_jobctl_trapping(task); | |
e5c1902e TH |
348 | } |
349 | ||
350 | /** | |
351 | * task_participate_group_stop - participate in a group stop | |
352 | * @task: task participating in a group stop | |
353 | * | |
a8f072c1 | 354 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
39efa3ef | 355 | * Group stop states are cleared and the group stop count is consumed if |
a8f072c1 | 356 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
68d8681e | 357 | * stop, the appropriate `SIGNAL_*` flags are set. |
e5c1902e TH |
358 | * |
359 | * CONTEXT: | |
360 | * Must be called with @task->sighand->siglock held. | |
244056f9 TH |
361 | * |
362 | * RETURNS: | |
363 | * %true if group stop completion should be notified to the parent, %false | |
364 | * otherwise. | |
e5c1902e TH |
365 | */ |
366 | static bool task_participate_group_stop(struct task_struct *task) | |
367 | { | |
368 | struct signal_struct *sig = task->signal; | |
a8f072c1 | 369 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
e5c1902e | 370 | |
a8f072c1 | 371 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
39efa3ef | 372 | |
3759a0d9 | 373 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
e5c1902e TH |
374 | |
375 | if (!consume) | |
376 | return false; | |
377 | ||
378 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | |
379 | sig->group_stop_count--; | |
380 | ||
244056f9 TH |
381 | /* |
382 | * Tell the caller to notify completion iff we are entering into a | |
383 | * fresh group stop. Read comment in do_signal_stop() for details. | |
384 | */ | |
385 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | |
2d39b3cd | 386 | signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); |
e5c1902e TH |
387 | return true; |
388 | } | |
389 | return false; | |
390 | } | |
391 | ||
924de3b8 EB |
392 | void task_join_group_stop(struct task_struct *task) |
393 | { | |
394 | /* Have the new thread join an on-going signal group stop */ | |
395 | unsigned long jobctl = current->jobctl; | |
396 | if (jobctl & JOBCTL_STOP_PENDING) { | |
397 | struct signal_struct *sig = current->signal; | |
398 | unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK; | |
399 | unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; | |
400 | if (task_set_jobctl_pending(task, signr | gstop)) { | |
401 | sig->group_stop_count++; | |
402 | } | |
403 | } | |
404 | } | |
405 | ||
c69e8d9c DH |
406 | /* |
407 | * allocate a new signal queue record | |
408 | * - this may be called without locks if and only if t == current, otherwise an | |
5aba085e | 409 | * appropriate lock must be held to stop the target task from exiting |
c69e8d9c | 410 | */ |
f84d49b2 NO |
411 | static struct sigqueue * |
412 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) | |
1da177e4 LT |
413 | { |
414 | struct sigqueue *q = NULL; | |
10b1fbdb | 415 | struct user_struct *user; |
1da177e4 | 416 | |
10b1fbdb | 417 | /* |
7cf7db8d TG |
418 | * Protect access to @t credentials. This can go away when all |
419 | * callers hold rcu read lock. | |
10b1fbdb | 420 | */ |
7cf7db8d | 421 | rcu_read_lock(); |
d84f4f99 | 422 | user = get_uid(__task_cred(t)->user); |
10b1fbdb | 423 | atomic_inc(&user->sigpending); |
7cf7db8d | 424 | rcu_read_unlock(); |
f84d49b2 | 425 | |
1da177e4 | 426 | if (override_rlimit || |
10b1fbdb | 427 | atomic_read(&user->sigpending) <= |
78d7d407 | 428 | task_rlimit(t, RLIMIT_SIGPENDING)) { |
1da177e4 | 429 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
f84d49b2 NO |
430 | } else { |
431 | print_dropped_signal(sig); | |
432 | } | |
433 | ||
1da177e4 | 434 | if (unlikely(q == NULL)) { |
10b1fbdb | 435 | atomic_dec(&user->sigpending); |
d84f4f99 | 436 | free_uid(user); |
1da177e4 LT |
437 | } else { |
438 | INIT_LIST_HEAD(&q->list); | |
439 | q->flags = 0; | |
d84f4f99 | 440 | q->user = user; |
1da177e4 | 441 | } |
d84f4f99 DH |
442 | |
443 | return q; | |
1da177e4 LT |
444 | } |
445 | ||
514a01b8 | 446 | static void __sigqueue_free(struct sigqueue *q) |
1da177e4 LT |
447 | { |
448 | if (q->flags & SIGQUEUE_PREALLOC) | |
449 | return; | |
450 | atomic_dec(&q->user->sigpending); | |
451 | free_uid(q->user); | |
452 | kmem_cache_free(sigqueue_cachep, q); | |
453 | } | |
454 | ||
6a14c5c9 | 455 | void flush_sigqueue(struct sigpending *queue) |
1da177e4 LT |
456 | { |
457 | struct sigqueue *q; | |
458 | ||
459 | sigemptyset(&queue->signal); | |
460 | while (!list_empty(&queue->list)) { | |
461 | q = list_entry(queue->list.next, struct sigqueue , list); | |
462 | list_del_init(&q->list); | |
463 | __sigqueue_free(q); | |
464 | } | |
465 | } | |
466 | ||
467 | /* | |
9e7c8f8c | 468 | * Flush all pending signals for this kthread. |
1da177e4 | 469 | */ |
c81addc9 | 470 | void flush_signals(struct task_struct *t) |
1da177e4 LT |
471 | { |
472 | unsigned long flags; | |
473 | ||
474 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
9e7c8f8c ON |
475 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
476 | flush_sigqueue(&t->pending); | |
477 | flush_sigqueue(&t->signal->shared_pending); | |
1da177e4 LT |
478 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
479 | } | |
fb50f5a4 | 480 | EXPORT_SYMBOL(flush_signals); |
1da177e4 | 481 | |
baa73d9e | 482 | #ifdef CONFIG_POSIX_TIMERS |
cbaffba1 ON |
483 | static void __flush_itimer_signals(struct sigpending *pending) |
484 | { | |
485 | sigset_t signal, retain; | |
486 | struct sigqueue *q, *n; | |
487 | ||
488 | signal = pending->signal; | |
489 | sigemptyset(&retain); | |
490 | ||
491 | list_for_each_entry_safe(q, n, &pending->list, list) { | |
492 | int sig = q->info.si_signo; | |
493 | ||
494 | if (likely(q->info.si_code != SI_TIMER)) { | |
495 | sigaddset(&retain, sig); | |
496 | } else { | |
497 | sigdelset(&signal, sig); | |
498 | list_del_init(&q->list); | |
499 | __sigqueue_free(q); | |
500 | } | |
501 | } | |
502 | ||
503 | sigorsets(&pending->signal, &signal, &retain); | |
504 | } | |
505 | ||
506 | void flush_itimer_signals(void) | |
507 | { | |
508 | struct task_struct *tsk = current; | |
509 | unsigned long flags; | |
510 | ||
511 | spin_lock_irqsave(&tsk->sighand->siglock, flags); | |
512 | __flush_itimer_signals(&tsk->pending); | |
513 | __flush_itimer_signals(&tsk->signal->shared_pending); | |
514 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | |
515 | } | |
baa73d9e | 516 | #endif |
cbaffba1 | 517 | |
10ab825b ON |
518 | void ignore_signals(struct task_struct *t) |
519 | { | |
520 | int i; | |
521 | ||
522 | for (i = 0; i < _NSIG; ++i) | |
523 | t->sighand->action[i].sa.sa_handler = SIG_IGN; | |
524 | ||
525 | flush_signals(t); | |
526 | } | |
527 | ||
1da177e4 LT |
528 | /* |
529 | * Flush all handlers for a task. | |
530 | */ | |
531 | ||
532 | void | |
533 | flush_signal_handlers(struct task_struct *t, int force_default) | |
534 | { | |
535 | int i; | |
536 | struct k_sigaction *ka = &t->sighand->action[0]; | |
537 | for (i = _NSIG ; i != 0 ; i--) { | |
538 | if (force_default || ka->sa.sa_handler != SIG_IGN) | |
539 | ka->sa.sa_handler = SIG_DFL; | |
540 | ka->sa.sa_flags = 0; | |
522cff14 | 541 | #ifdef __ARCH_HAS_SA_RESTORER |
2ca39528 KC |
542 | ka->sa.sa_restorer = NULL; |
543 | #endif | |
1da177e4 LT |
544 | sigemptyset(&ka->sa.sa_mask); |
545 | ka++; | |
546 | } | |
547 | } | |
548 | ||
67a48a24 | 549 | bool unhandled_signal(struct task_struct *tsk, int sig) |
abd4f750 | 550 | { |
445a91d2 | 551 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
b460cbc5 | 552 | if (is_global_init(tsk)) |
67a48a24 CB |
553 | return true; |
554 | ||
445a91d2 | 555 | if (handler != SIG_IGN && handler != SIG_DFL) |
67a48a24 CB |
556 | return false; |
557 | ||
a288eecc TH |
558 | /* if ptraced, let the tracer determine */ |
559 | return !tsk->ptrace; | |
abd4f750 MAS |
560 | } |
561 | ||
ae7795bc | 562 | static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, |
57db7e4a | 563 | bool *resched_timer) |
1da177e4 LT |
564 | { |
565 | struct sigqueue *q, *first = NULL; | |
1da177e4 | 566 | |
1da177e4 LT |
567 | /* |
568 | * Collect the siginfo appropriate to this signal. Check if | |
569 | * there is another siginfo for the same signal. | |
570 | */ | |
571 | list_for_each_entry(q, &list->list, list) { | |
572 | if (q->info.si_signo == sig) { | |
d4434207 ON |
573 | if (first) |
574 | goto still_pending; | |
1da177e4 LT |
575 | first = q; |
576 | } | |
577 | } | |
d4434207 ON |
578 | |
579 | sigdelset(&list->signal, sig); | |
580 | ||
1da177e4 | 581 | if (first) { |
d4434207 | 582 | still_pending: |
1da177e4 LT |
583 | list_del_init(&first->list); |
584 | copy_siginfo(info, &first->info); | |
57db7e4a EB |
585 | |
586 | *resched_timer = | |
587 | (first->flags & SIGQUEUE_PREALLOC) && | |
588 | (info->si_code == SI_TIMER) && | |
589 | (info->si_sys_private); | |
590 | ||
1da177e4 | 591 | __sigqueue_free(first); |
1da177e4 | 592 | } else { |
5aba085e RD |
593 | /* |
594 | * Ok, it wasn't in the queue. This must be | |
595 | * a fast-pathed signal or we must have been | |
596 | * out of queue space. So zero out the info. | |
1da177e4 | 597 | */ |
faf1f22b | 598 | clear_siginfo(info); |
1da177e4 LT |
599 | info->si_signo = sig; |
600 | info->si_errno = 0; | |
7486e5d9 | 601 | info->si_code = SI_USER; |
1da177e4 LT |
602 | info->si_pid = 0; |
603 | info->si_uid = 0; | |
604 | } | |
1da177e4 LT |
605 | } |
606 | ||
607 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |
ae7795bc | 608 | kernel_siginfo_t *info, bool *resched_timer) |
1da177e4 | 609 | { |
27d91e07 | 610 | int sig = next_signal(pending, mask); |
1da177e4 | 611 | |
2e01fabe | 612 | if (sig) |
57db7e4a | 613 | collect_signal(sig, pending, info, resched_timer); |
1da177e4 LT |
614 | return sig; |
615 | } | |
616 | ||
617 | /* | |
5aba085e | 618 | * Dequeue a signal and return the element to the caller, which is |
1da177e4 LT |
619 | * expected to free it. |
620 | * | |
621 | * All callers have to hold the siglock. | |
622 | */ | |
ae7795bc | 623 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info) |
1da177e4 | 624 | { |
57db7e4a | 625 | bool resched_timer = false; |
c5363d03 | 626 | int signr; |
caec4e8d BH |
627 | |
628 | /* We only dequeue private signals from ourselves, we don't let | |
629 | * signalfd steal them | |
630 | */ | |
57db7e4a | 631 | signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); |
8bfd9a7a | 632 | if (!signr) { |
1da177e4 | 633 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
57db7e4a | 634 | mask, info, &resched_timer); |
baa73d9e | 635 | #ifdef CONFIG_POSIX_TIMERS |
8bfd9a7a TG |
636 | /* |
637 | * itimer signal ? | |
638 | * | |
639 | * itimers are process shared and we restart periodic | |
640 | * itimers in the signal delivery path to prevent DoS | |
641 | * attacks in the high resolution timer case. This is | |
5aba085e | 642 | * compliant with the old way of self-restarting |
8bfd9a7a TG |
643 | * itimers, as the SIGALRM is a legacy signal and only |
644 | * queued once. Changing the restart behaviour to | |
645 | * restart the timer in the signal dequeue path is | |
646 | * reducing the timer noise on heavy loaded !highres | |
647 | * systems too. | |
648 | */ | |
649 | if (unlikely(signr == SIGALRM)) { | |
650 | struct hrtimer *tmr = &tsk->signal->real_timer; | |
651 | ||
652 | if (!hrtimer_is_queued(tmr) && | |
2456e855 | 653 | tsk->signal->it_real_incr != 0) { |
8bfd9a7a TG |
654 | hrtimer_forward(tmr, tmr->base->get_time(), |
655 | tsk->signal->it_real_incr); | |
656 | hrtimer_restart(tmr); | |
657 | } | |
658 | } | |
baa73d9e | 659 | #endif |
8bfd9a7a | 660 | } |
c5363d03 | 661 | |
b8fceee1 | 662 | recalc_sigpending(); |
c5363d03 PE |
663 | if (!signr) |
664 | return 0; | |
665 | ||
666 | if (unlikely(sig_kernel_stop(signr))) { | |
8bfd9a7a TG |
667 | /* |
668 | * Set a marker that we have dequeued a stop signal. Our | |
669 | * caller might release the siglock and then the pending | |
670 | * stop signal it is about to process is no longer in the | |
671 | * pending bitmasks, but must still be cleared by a SIGCONT | |
672 | * (and overruled by a SIGKILL). So those cases clear this | |
673 | * shared flag after we've set it. Note that this flag may | |
674 | * remain set after the signal we return is ignored or | |
675 | * handled. That doesn't matter because its only purpose | |
676 | * is to alert stop-signal processing code when another | |
677 | * processor has come along and cleared the flag. | |
678 | */ | |
a8f072c1 | 679 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
8bfd9a7a | 680 | } |
baa73d9e | 681 | #ifdef CONFIG_POSIX_TIMERS |
57db7e4a | 682 | if (resched_timer) { |
1da177e4 LT |
683 | /* |
684 | * Release the siglock to ensure proper locking order | |
685 | * of timer locks outside of siglocks. Note, we leave | |
686 | * irqs disabled here, since the posix-timers code is | |
687 | * about to disable them again anyway. | |
688 | */ | |
689 | spin_unlock(&tsk->sighand->siglock); | |
96fe3b07 | 690 | posixtimer_rearm(info); |
1da177e4 | 691 | spin_lock(&tsk->sighand->siglock); |
9943d3ac EB |
692 | |
693 | /* Don't expose the si_sys_private value to userspace */ | |
694 | info->si_sys_private = 0; | |
1da177e4 | 695 | } |
baa73d9e | 696 | #endif |
1da177e4 LT |
697 | return signr; |
698 | } | |
fb50f5a4 | 699 | EXPORT_SYMBOL_GPL(dequeue_signal); |
1da177e4 | 700 | |
7146db33 EB |
701 | static int dequeue_synchronous_signal(kernel_siginfo_t *info) |
702 | { | |
703 | struct task_struct *tsk = current; | |
704 | struct sigpending *pending = &tsk->pending; | |
705 | struct sigqueue *q, *sync = NULL; | |
706 | ||
707 | /* | |
708 | * Might a synchronous signal be in the queue? | |
709 | */ | |
710 | if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) | |
711 | return 0; | |
712 | ||
713 | /* | |
714 | * Return the first synchronous signal in the queue. | |
715 | */ | |
716 | list_for_each_entry(q, &pending->list, list) { | |
717 | /* Synchronous signals have a postive si_code */ | |
718 | if ((q->info.si_code > SI_USER) && | |
719 | (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { | |
720 | sync = q; | |
721 | goto next; | |
722 | } | |
723 | } | |
724 | return 0; | |
725 | next: | |
726 | /* | |
727 | * Check if there is another siginfo for the same signal. | |
728 | */ | |
729 | list_for_each_entry_continue(q, &pending->list, list) { | |
730 | if (q->info.si_signo == sync->info.si_signo) | |
731 | goto still_pending; | |
732 | } | |
733 | ||
734 | sigdelset(&pending->signal, sync->info.si_signo); | |
735 | recalc_sigpending(); | |
736 | still_pending: | |
737 | list_del_init(&sync->list); | |
738 | copy_siginfo(info, &sync->info); | |
739 | __sigqueue_free(sync); | |
740 | return info->si_signo; | |
741 | } | |
742 | ||
1da177e4 LT |
743 | /* |
744 | * Tell a process that it has a new active signal.. | |
745 | * | |
746 | * NOTE! we rely on the previous spin_lock to | |
747 | * lock interrupts for us! We can only be called with | |
748 | * "siglock" held, and the local interrupt must | |
749 | * have been disabled when that got acquired! | |
750 | * | |
751 | * No need to set need_resched since signal event passing | |
752 | * goes through ->blocked | |
753 | */ | |
910ffdb1 | 754 | void signal_wake_up_state(struct task_struct *t, unsigned int state) |
1da177e4 | 755 | { |
1da177e4 | 756 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
1da177e4 | 757 | /* |
910ffdb1 | 758 | * TASK_WAKEKILL also means wake it up in the stopped/traced/killable |
f021a3c2 | 759 | * case. We don't check t->state here because there is a race with it |
1da177e4 LT |
760 | * executing another processor and just now entering stopped state. |
761 | * By using wake_up_state, we ensure the process will wake up and | |
762 | * handle its death signal. | |
763 | */ | |
910ffdb1 | 764 | if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) |
1da177e4 LT |
765 | kick_process(t); |
766 | } | |
767 | ||
71fabd5e GA |
768 | /* |
769 | * Remove signals in mask from the pending set and queue. | |
770 | * Returns 1 if any signals were found. | |
771 | * | |
772 | * All callers must be holding the siglock. | |
71fabd5e | 773 | */ |
8f11351e | 774 | static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) |
71fabd5e GA |
775 | { |
776 | struct sigqueue *q, *n; | |
777 | sigset_t m; | |
778 | ||
779 | sigandsets(&m, mask, &s->signal); | |
780 | if (sigisemptyset(&m)) | |
8f11351e | 781 | return; |
71fabd5e | 782 | |
702a5073 | 783 | sigandnsets(&s->signal, &s->signal, mask); |
71fabd5e GA |
784 | list_for_each_entry_safe(q, n, &s->list, list) { |
785 | if (sigismember(mask, q->info.si_signo)) { | |
786 | list_del_init(&q->list); | |
787 | __sigqueue_free(q); | |
788 | } | |
789 | } | |
71fabd5e | 790 | } |
1da177e4 | 791 | |
ae7795bc | 792 | static inline int is_si_special(const struct kernel_siginfo *info) |
614c517d | 793 | { |
4ff4c31a | 794 | return info <= SEND_SIG_PRIV; |
614c517d ON |
795 | } |
796 | ||
ae7795bc | 797 | static inline bool si_fromuser(const struct kernel_siginfo *info) |
614c517d ON |
798 | { |
799 | return info == SEND_SIG_NOINFO || | |
800 | (!is_si_special(info) && SI_FROMUSER(info)); | |
801 | } | |
802 | ||
39fd3393 SH |
803 | /* |
804 | * called with RCU read lock from check_kill_permission() | |
805 | */ | |
2a9b9094 | 806 | static bool kill_ok_by_cred(struct task_struct *t) |
39fd3393 SH |
807 | { |
808 | const struct cred *cred = current_cred(); | |
809 | const struct cred *tcred = __task_cred(t); | |
810 | ||
2a9b9094 CB |
811 | return uid_eq(cred->euid, tcred->suid) || |
812 | uid_eq(cred->euid, tcred->uid) || | |
813 | uid_eq(cred->uid, tcred->suid) || | |
814 | uid_eq(cred->uid, tcred->uid) || | |
815 | ns_capable(tcred->user_ns, CAP_KILL); | |
39fd3393 SH |
816 | } |
817 | ||
1da177e4 LT |
818 | /* |
819 | * Bad permissions for sending the signal | |
694f690d | 820 | * - the caller must hold the RCU read lock |
1da177e4 | 821 | */ |
ae7795bc | 822 | static int check_kill_permission(int sig, struct kernel_siginfo *info, |
1da177e4 LT |
823 | struct task_struct *t) |
824 | { | |
2e2ba22e | 825 | struct pid *sid; |
3b5e9e53 ON |
826 | int error; |
827 | ||
7ed20e1a | 828 | if (!valid_signal(sig)) |
3b5e9e53 ON |
829 | return -EINVAL; |
830 | ||
614c517d | 831 | if (!si_fromuser(info)) |
3b5e9e53 | 832 | return 0; |
e54dc243 | 833 | |
3b5e9e53 ON |
834 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
835 | if (error) | |
1da177e4 | 836 | return error; |
3b5e9e53 | 837 | |
065add39 | 838 | if (!same_thread_group(current, t) && |
39fd3393 | 839 | !kill_ok_by_cred(t)) { |
2e2ba22e ON |
840 | switch (sig) { |
841 | case SIGCONT: | |
2e2ba22e | 842 | sid = task_session(t); |
2e2ba22e ON |
843 | /* |
844 | * We don't return the error if sid == NULL. The | |
845 | * task was unhashed, the caller must notice this. | |
846 | */ | |
847 | if (!sid || sid == task_session(current)) | |
848 | break; | |
b028fb61 | 849 | /* fall through */ |
2e2ba22e ON |
850 | default: |
851 | return -EPERM; | |
852 | } | |
853 | } | |
c2f0c7c3 | 854 | |
6b4f3d01 | 855 | return security_task_kill(t, info, sig, NULL); |
1da177e4 LT |
856 | } |
857 | ||
fb1d910c TH |
858 | /** |
859 | * ptrace_trap_notify - schedule trap to notify ptracer | |
860 | * @t: tracee wanting to notify tracer | |
861 | * | |
862 | * This function schedules sticky ptrace trap which is cleared on the next | |
863 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by | |
864 | * ptracer. | |
865 | * | |
544b2c91 TH |
866 | * If @t is running, STOP trap will be taken. If trapped for STOP and |
867 | * ptracer is listening for events, tracee is woken up so that it can | |
868 | * re-trap for the new event. If trapped otherwise, STOP trap will be | |
869 | * eventually taken without returning to userland after the existing traps | |
870 | * are finished by PTRACE_CONT. | |
fb1d910c TH |
871 | * |
872 | * CONTEXT: | |
873 | * Must be called with @task->sighand->siglock held. | |
874 | */ | |
875 | static void ptrace_trap_notify(struct task_struct *t) | |
876 | { | |
877 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); | |
878 | assert_spin_locked(&t->sighand->siglock); | |
879 | ||
880 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); | |
910ffdb1 | 881 | ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
fb1d910c TH |
882 | } |
883 | ||
1da177e4 | 884 | /* |
7e695a5e ON |
885 | * Handle magic process-wide effects of stop/continue signals. Unlike |
886 | * the signal actions, these happen immediately at signal-generation | |
1da177e4 LT |
887 | * time regardless of blocking, ignoring, or handling. This does the |
888 | * actual continuing for SIGCONT, but not the actual stopping for stop | |
7e695a5e ON |
889 | * signals. The process stop is done as a signal action for SIG_DFL. |
890 | * | |
891 | * Returns true if the signal should be actually delivered, otherwise | |
892 | * it should be dropped. | |
1da177e4 | 893 | */ |
403bad72 | 894 | static bool prepare_signal(int sig, struct task_struct *p, bool force) |
1da177e4 | 895 | { |
ad16a460 | 896 | struct signal_struct *signal = p->signal; |
1da177e4 | 897 | struct task_struct *t; |
9490592f | 898 | sigset_t flush; |
1da177e4 | 899 | |
403bad72 | 900 | if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { |
5fa534c9 | 901 | if (!(signal->flags & SIGNAL_GROUP_EXIT)) |
403bad72 | 902 | return sig == SIGKILL; |
1da177e4 | 903 | /* |
7e695a5e | 904 | * The process is in the middle of dying, nothing to do. |
1da177e4 | 905 | */ |
7e695a5e | 906 | } else if (sig_kernel_stop(sig)) { |
1da177e4 LT |
907 | /* |
908 | * This is a stop signal. Remove SIGCONT from all queues. | |
909 | */ | |
9490592f | 910 | siginitset(&flush, sigmask(SIGCONT)); |
c09c1441 | 911 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
9490592f | 912 | for_each_thread(p, t) |
c09c1441 | 913 | flush_sigqueue_mask(&flush, &t->pending); |
1da177e4 | 914 | } else if (sig == SIGCONT) { |
fc321d2e | 915 | unsigned int why; |
1da177e4 | 916 | /* |
1deac632 | 917 | * Remove all stop signals from all queues, wake all threads. |
1da177e4 | 918 | */ |
9490592f | 919 | siginitset(&flush, SIG_KERNEL_STOP_MASK); |
c09c1441 | 920 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
9490592f | 921 | for_each_thread(p, t) { |
c09c1441 | 922 | flush_sigqueue_mask(&flush, &t->pending); |
3759a0d9 | 923 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
fb1d910c TH |
924 | if (likely(!(t->ptrace & PT_SEIZED))) |
925 | wake_up_state(t, __TASK_STOPPED); | |
926 | else | |
927 | ptrace_trap_notify(t); | |
9490592f | 928 | } |
1da177e4 | 929 | |
fc321d2e ON |
930 | /* |
931 | * Notify the parent with CLD_CONTINUED if we were stopped. | |
932 | * | |
933 | * If we were in the middle of a group stop, we pretend it | |
934 | * was already finished, and then continued. Since SIGCHLD | |
935 | * doesn't queue we report only CLD_STOPPED, as if the next | |
936 | * CLD_CONTINUED was dropped. | |
937 | */ | |
938 | why = 0; | |
ad16a460 | 939 | if (signal->flags & SIGNAL_STOP_STOPPED) |
fc321d2e | 940 | why |= SIGNAL_CLD_CONTINUED; |
ad16a460 | 941 | else if (signal->group_stop_count) |
fc321d2e ON |
942 | why |= SIGNAL_CLD_STOPPED; |
943 | ||
944 | if (why) { | |
021e1ae3 | 945 | /* |
ae6d2ed7 | 946 | * The first thread which returns from do_signal_stop() |
021e1ae3 | 947 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
2e58f57d | 948 | * notify its parent. See get_signal(). |
021e1ae3 | 949 | */ |
2d39b3cd | 950 | signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); |
ad16a460 ON |
951 | signal->group_stop_count = 0; |
952 | signal->group_exit_code = 0; | |
1da177e4 | 953 | } |
1da177e4 | 954 | } |
7e695a5e | 955 | |
def8cf72 | 956 | return !sig_ignored(p, sig, force); |
1da177e4 LT |
957 | } |
958 | ||
71f11dc0 ON |
959 | /* |
960 | * Test if P wants to take SIG. After we've checked all threads with this, | |
961 | * it's equivalent to finding no threads not blocking SIG. Any threads not | |
962 | * blocking SIG were ruled out because they are not running and already | |
963 | * have pending signals. Such threads will dequeue from the shared queue | |
964 | * as soon as they're available, so putting the signal on the shared queue | |
965 | * will be equivalent to sending it to one such thread. | |
966 | */ | |
acd14e62 | 967 | static inline bool wants_signal(int sig, struct task_struct *p) |
71f11dc0 ON |
968 | { |
969 | if (sigismember(&p->blocked, sig)) | |
acd14e62 CB |
970 | return false; |
971 | ||
71f11dc0 | 972 | if (p->flags & PF_EXITING) |
acd14e62 CB |
973 | return false; |
974 | ||
71f11dc0 | 975 | if (sig == SIGKILL) |
acd14e62 CB |
976 | return true; |
977 | ||
71f11dc0 | 978 | if (task_is_stopped_or_traced(p)) |
acd14e62 CB |
979 | return false; |
980 | ||
71f11dc0 ON |
981 | return task_curr(p) || !signal_pending(p); |
982 | } | |
983 | ||
07296149 | 984 | static void complete_signal(int sig, struct task_struct *p, enum pid_type type) |
71f11dc0 ON |
985 | { |
986 | struct signal_struct *signal = p->signal; | |
987 | struct task_struct *t; | |
988 | ||
989 | /* | |
990 | * Now find a thread we can wake up to take the signal off the queue. | |
991 | * | |
992 | * If the main thread wants the signal, it gets first crack. | |
993 | * Probably the least surprising to the average bear. | |
994 | */ | |
995 | if (wants_signal(sig, p)) | |
996 | t = p; | |
07296149 | 997 | else if ((type == PIDTYPE_PID) || thread_group_empty(p)) |
71f11dc0 ON |
998 | /* |
999 | * There is just one thread and it does not need to be woken. | |
1000 | * It will dequeue unblocked signals before it runs again. | |
1001 | */ | |
1002 | return; | |
1003 | else { | |
1004 | /* | |
1005 | * Otherwise try to find a suitable thread. | |
1006 | */ | |
1007 | t = signal->curr_target; | |
1008 | while (!wants_signal(sig, t)) { | |
1009 | t = next_thread(t); | |
1010 | if (t == signal->curr_target) | |
1011 | /* | |
1012 | * No thread needs to be woken. | |
1013 | * Any eligible threads will see | |
1014 | * the signal in the queue soon. | |
1015 | */ | |
1016 | return; | |
1017 | } | |
1018 | signal->curr_target = t; | |
1019 | } | |
1020 | ||
1021 | /* | |
1022 | * Found a killable thread. If the signal will be fatal, | |
1023 | * then start taking the whole group down immediately. | |
1024 | */ | |
fae5fa44 | 1025 | if (sig_fatal(p, sig) && |
42691579 | 1026 | !(signal->flags & SIGNAL_GROUP_EXIT) && |
71f11dc0 | 1027 | !sigismember(&t->real_blocked, sig) && |
42691579 | 1028 | (sig == SIGKILL || !p->ptrace)) { |
71f11dc0 ON |
1029 | /* |
1030 | * This signal will be fatal to the whole group. | |
1031 | */ | |
1032 | if (!sig_kernel_coredump(sig)) { | |
1033 | /* | |
1034 | * Start a group exit and wake everybody up. | |
1035 | * This way we don't have other threads | |
1036 | * running and doing things after a slower | |
1037 | * thread has the fatal signal pending. | |
1038 | */ | |
1039 | signal->flags = SIGNAL_GROUP_EXIT; | |
1040 | signal->group_exit_code = sig; | |
1041 | signal->group_stop_count = 0; | |
1042 | t = p; | |
1043 | do { | |
6dfca329 | 1044 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
71f11dc0 ON |
1045 | sigaddset(&t->pending.signal, SIGKILL); |
1046 | signal_wake_up(t, 1); | |
1047 | } while_each_thread(p, t); | |
1048 | return; | |
1049 | } | |
1050 | } | |
1051 | ||
1052 | /* | |
1053 | * The signal is already in the shared-pending queue. | |
1054 | * Tell the chosen thread to wake up and dequeue it. | |
1055 | */ | |
1056 | signal_wake_up(t, sig == SIGKILL); | |
1057 | return; | |
1058 | } | |
1059 | ||
a19e2c01 | 1060 | static inline bool legacy_queue(struct sigpending *signals, int sig) |
af7fff9c PE |
1061 | { |
1062 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); | |
1063 | } | |
1064 | ||
ae7795bc | 1065 | static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, |
8ad23dea | 1066 | enum pid_type type, bool force) |
1da177e4 | 1067 | { |
2ca3515a | 1068 | struct sigpending *pending; |
6e65acba | 1069 | struct sigqueue *q; |
7a0aeb14 | 1070 | int override_rlimit; |
6c303d3a | 1071 | int ret = 0, result; |
0a16b607 | 1072 | |
6e65acba | 1073 | assert_spin_locked(&t->sighand->siglock); |
921cf9f6 | 1074 | |
6c303d3a | 1075 | result = TRACE_SIGNAL_IGNORED; |
8ad23dea | 1076 | if (!prepare_signal(sig, t, force)) |
6c303d3a | 1077 | goto ret; |
2ca3515a | 1078 | |
5a883cee | 1079 | pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; |
2acb024d PE |
1080 | /* |
1081 | * Short-circuit ignored signals and support queuing | |
1082 | * exactly one non-rt signal, so that we can get more | |
1083 | * detailed information about the cause of the signal. | |
1084 | */ | |
6c303d3a | 1085 | result = TRACE_SIGNAL_ALREADY_PENDING; |
7e695a5e | 1086 | if (legacy_queue(pending, sig)) |
6c303d3a ON |
1087 | goto ret; |
1088 | ||
1089 | result = TRACE_SIGNAL_DELIVERED; | |
1da177e4 | 1090 | /* |
a692933a | 1091 | * Skip useless siginfo allocation for SIGKILL and kernel threads. |
1da177e4 | 1092 | */ |
a692933a | 1093 | if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) |
1da177e4 LT |
1094 | goto out_set; |
1095 | ||
5aba085e RD |
1096 | /* |
1097 | * Real-time signals must be queued if sent by sigqueue, or | |
1098 | * some other real-time mechanism. It is implementation | |
1099 | * defined whether kill() does so. We attempt to do so, on | |
1100 | * the principle of least surprise, but since kill is not | |
1101 | * allowed to fail with EAGAIN when low on memory we just | |
1102 | * make sure at least one signal gets delivered and don't | |
1103 | * pass on the info struct. | |
1104 | */ | |
7a0aeb14 VN |
1105 | if (sig < SIGRTMIN) |
1106 | override_rlimit = (is_si_special(info) || info->si_code >= 0); | |
1107 | else | |
1108 | override_rlimit = 0; | |
1109 | ||
75f296d9 | 1110 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); |
1da177e4 | 1111 | if (q) { |
2ca3515a | 1112 | list_add_tail(&q->list, &pending->list); |
1da177e4 | 1113 | switch ((unsigned long) info) { |
b67a1b9e | 1114 | case (unsigned long) SEND_SIG_NOINFO: |
faf1f22b | 1115 | clear_siginfo(&q->info); |
1da177e4 LT |
1116 | q->info.si_signo = sig; |
1117 | q->info.si_errno = 0; | |
1118 | q->info.si_code = SI_USER; | |
9cd4fd10 | 1119 | q->info.si_pid = task_tgid_nr_ns(current, |
09bca05c | 1120 | task_active_pid_ns(t)); |
7a0cf094 EB |
1121 | rcu_read_lock(); |
1122 | q->info.si_uid = | |
1123 | from_kuid_munged(task_cred_xxx(t, user_ns), | |
1124 | current_uid()); | |
1125 | rcu_read_unlock(); | |
1da177e4 | 1126 | break; |
b67a1b9e | 1127 | case (unsigned long) SEND_SIG_PRIV: |
faf1f22b | 1128 | clear_siginfo(&q->info); |
1da177e4 LT |
1129 | q->info.si_signo = sig; |
1130 | q->info.si_errno = 0; | |
1131 | q->info.si_code = SI_KERNEL; | |
1132 | q->info.si_pid = 0; | |
1133 | q->info.si_uid = 0; | |
1134 | break; | |
1135 | default: | |
1136 | copy_siginfo(&q->info, info); | |
1137 | break; | |
1138 | } | |
8917bef3 EB |
1139 | } else if (!is_si_special(info) && |
1140 | sig >= SIGRTMIN && info->si_code != SI_USER) { | |
1141 | /* | |
1142 | * Queue overflow, abort. We may abort if the | |
1143 | * signal was rt and sent by user using something | |
1144 | * other than kill(). | |
1145 | */ | |
1146 | result = TRACE_SIGNAL_OVERFLOW_FAIL; | |
1147 | ret = -EAGAIN; | |
1148 | goto ret; | |
1149 | } else { | |
1150 | /* | |
1151 | * This is a silent loss of information. We still | |
1152 | * send the signal, but the *info bits are lost. | |
1153 | */ | |
1154 | result = TRACE_SIGNAL_LOSE_INFO; | |
1da177e4 LT |
1155 | } |
1156 | ||
1157 | out_set: | |
53c30337 | 1158 | signalfd_notify(t, sig); |
2ca3515a | 1159 | sigaddset(&pending->signal, sig); |
c3ad2c3b EB |
1160 | |
1161 | /* Let multiprocess signals appear after on-going forks */ | |
1162 | if (type > PIDTYPE_TGID) { | |
1163 | struct multiprocess_signals *delayed; | |
1164 | hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { | |
1165 | sigset_t *signal = &delayed->signal; | |
1166 | /* Can't queue both a stop and a continue signal */ | |
1167 | if (sig == SIGCONT) | |
1168 | sigdelsetmask(signal, SIG_KERNEL_STOP_MASK); | |
1169 | else if (sig_kernel_stop(sig)) | |
1170 | sigdelset(signal, SIGCONT); | |
1171 | sigaddset(signal, sig); | |
1172 | } | |
1173 | } | |
1174 | ||
07296149 | 1175 | complete_signal(sig, t, type); |
6c303d3a | 1176 | ret: |
5a883cee | 1177 | trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result); |
6c303d3a | 1178 | return ret; |
1da177e4 LT |
1179 | } |
1180 | ||
7a0cf094 EB |
1181 | static inline bool has_si_pid_and_uid(struct kernel_siginfo *info) |
1182 | { | |
1183 | bool ret = false; | |
1184 | switch (siginfo_layout(info->si_signo, info->si_code)) { | |
1185 | case SIL_KILL: | |
1186 | case SIL_CHLD: | |
1187 | case SIL_RT: | |
1188 | ret = true; | |
1189 | break; | |
1190 | case SIL_TIMER: | |
1191 | case SIL_POLL: | |
1192 | case SIL_FAULT: | |
1193 | case SIL_FAULT_MCEERR: | |
1194 | case SIL_FAULT_BNDERR: | |
1195 | case SIL_FAULT_PKUERR: | |
1196 | case SIL_SYS: | |
1197 | ret = false; | |
1198 | break; | |
1199 | } | |
1200 | return ret; | |
1201 | } | |
1202 | ||
ae7795bc | 1203 | static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, |
b213984b | 1204 | enum pid_type type) |
7978b567 | 1205 | { |
8ad23dea EB |
1206 | /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ |
1207 | bool force = false; | |
921cf9f6 | 1208 | |
8ad23dea EB |
1209 | if (info == SEND_SIG_NOINFO) { |
1210 | /* Force if sent from an ancestor pid namespace */ | |
1211 | force = !task_pid_nr_ns(current, task_active_pid_ns(t)); | |
1212 | } else if (info == SEND_SIG_PRIV) { | |
1213 | /* Don't ignore kernel generated signals */ | |
1214 | force = true; | |
1215 | } else if (has_si_pid_and_uid(info)) { | |
1216 | /* SIGKILL and SIGSTOP is special or has ids */ | |
7a0cf094 EB |
1217 | struct user_namespace *t_user_ns; |
1218 | ||
1219 | rcu_read_lock(); | |
1220 | t_user_ns = task_cred_xxx(t, user_ns); | |
1221 | if (current_user_ns() != t_user_ns) { | |
1222 | kuid_t uid = make_kuid(current_user_ns(), info->si_uid); | |
1223 | info->si_uid = from_kuid_munged(t_user_ns, uid); | |
1224 | } | |
1225 | rcu_read_unlock(); | |
921cf9f6 | 1226 | |
8ad23dea EB |
1227 | /* A kernel generated signal? */ |
1228 | force = (info->si_code == SI_KERNEL); | |
1229 | ||
1230 | /* From an ancestor pid namespace? */ | |
1231 | if (!task_pid_nr_ns(current, task_active_pid_ns(t))) { | |
7a0cf094 | 1232 | info->si_pid = 0; |
8ad23dea EB |
1233 | force = true; |
1234 | } | |
7a0cf094 | 1235 | } |
8ad23dea | 1236 | return __send_signal(sig, info, t, type, force); |
7978b567 SB |
1237 | } |
1238 | ||
4aaefee5 | 1239 | static void print_fatal_signal(int signr) |
45807a1d | 1240 | { |
4aaefee5 | 1241 | struct pt_regs *regs = signal_pt_regs(); |
747800ef | 1242 | pr_info("potentially unexpected fatal signal %d.\n", signr); |
45807a1d | 1243 | |
ca5cd877 | 1244 | #if defined(__i386__) && !defined(__arch_um__) |
747800ef | 1245 | pr_info("code at %08lx: ", regs->ip); |
45807a1d IM |
1246 | { |
1247 | int i; | |
1248 | for (i = 0; i < 16; i++) { | |
1249 | unsigned char insn; | |
1250 | ||
b45c6e76 AK |
1251 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
1252 | break; | |
747800ef | 1253 | pr_cont("%02x ", insn); |
45807a1d IM |
1254 | } |
1255 | } | |
747800ef | 1256 | pr_cont("\n"); |
45807a1d | 1257 | #endif |
3a9f84d3 | 1258 | preempt_disable(); |
45807a1d | 1259 | show_regs(regs); |
3a9f84d3 | 1260 | preempt_enable(); |
45807a1d IM |
1261 | } |
1262 | ||
1263 | static int __init setup_print_fatal_signals(char *str) | |
1264 | { | |
1265 | get_option (&str, &print_fatal_signals); | |
1266 | ||
1267 | return 1; | |
1268 | } | |
1269 | ||
1270 | __setup("print-fatal-signals=", setup_print_fatal_signals); | |
1da177e4 | 1271 | |
4cd4b6d4 | 1272 | int |
ae7795bc | 1273 | __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) |
4cd4b6d4 | 1274 | { |
b213984b | 1275 | return send_signal(sig, info, p, PIDTYPE_TGID); |
4cd4b6d4 PE |
1276 | } |
1277 | ||
ae7795bc | 1278 | int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, |
40b3b025 | 1279 | enum pid_type type) |
4a30debf ON |
1280 | { |
1281 | unsigned long flags; | |
1282 | int ret = -ESRCH; | |
1283 | ||
1284 | if (lock_task_sighand(p, &flags)) { | |
b213984b | 1285 | ret = send_signal(sig, info, p, type); |
4a30debf ON |
1286 | unlock_task_sighand(p, &flags); |
1287 | } | |
1288 | ||
1289 | return ret; | |
1290 | } | |
1291 | ||
1da177e4 LT |
1292 | /* |
1293 | * Force a signal that the process can't ignore: if necessary | |
1294 | * we unblock the signal and change any SIG_IGN to SIG_DFL. | |
ae74c3b6 LT |
1295 | * |
1296 | * Note: If we unblock the signal, we always reset it to SIG_DFL, | |
1297 | * since we do not want to have a signal handler that was blocked | |
1298 | * be invoked when user space had explicitly blocked it. | |
1299 | * | |
80fe728d ON |
1300 | * We don't want to have recursive SIGSEGV's etc, for example, |
1301 | * that is why we also clear SIGNAL_UNKILLABLE. | |
1da177e4 | 1302 | */ |
59c0e696 EB |
1303 | static int |
1304 | force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t) | |
1da177e4 LT |
1305 | { |
1306 | unsigned long int flags; | |
ae74c3b6 LT |
1307 | int ret, blocked, ignored; |
1308 | struct k_sigaction *action; | |
59c0e696 | 1309 | int sig = info->si_signo; |
1da177e4 LT |
1310 | |
1311 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
ae74c3b6 LT |
1312 | action = &t->sighand->action[sig-1]; |
1313 | ignored = action->sa.sa_handler == SIG_IGN; | |
1314 | blocked = sigismember(&t->blocked, sig); | |
1315 | if (blocked || ignored) { | |
1316 | action->sa.sa_handler = SIG_DFL; | |
1317 | if (blocked) { | |
1318 | sigdelset(&t->blocked, sig); | |
7bb44ade | 1319 | recalc_sigpending_and_wake(t); |
ae74c3b6 | 1320 | } |
1da177e4 | 1321 | } |
eb61b591 JI |
1322 | /* |
1323 | * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect | |
1324 | * debugging to leave init killable. | |
1325 | */ | |
1326 | if (action->sa.sa_handler == SIG_DFL && !t->ptrace) | |
80fe728d | 1327 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
b21c5bd5 | 1328 | ret = send_signal(sig, info, t, PIDTYPE_PID); |
1da177e4 LT |
1329 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
1330 | ||
1331 | return ret; | |
1332 | } | |
1333 | ||
a89e9b8a | 1334 | int force_sig_info(struct kernel_siginfo *info) |
59c0e696 | 1335 | { |
a89e9b8a | 1336 | return force_sig_info_to_task(info, current); |
59c0e696 EB |
1337 | } |
1338 | ||
1da177e4 LT |
1339 | /* |
1340 | * Nuke all other threads in the group. | |
1341 | */ | |
09faef11 | 1342 | int zap_other_threads(struct task_struct *p) |
1da177e4 | 1343 | { |
09faef11 ON |
1344 | struct task_struct *t = p; |
1345 | int count = 0; | |
1da177e4 | 1346 | |
1da177e4 LT |
1347 | p->signal->group_stop_count = 0; |
1348 | ||
09faef11 | 1349 | while_each_thread(p, t) { |
6dfca329 | 1350 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
09faef11 ON |
1351 | count++; |
1352 | ||
1353 | /* Don't bother with already dead threads */ | |
1da177e4 LT |
1354 | if (t->exit_state) |
1355 | continue; | |
1da177e4 | 1356 | sigaddset(&t->pending.signal, SIGKILL); |
1da177e4 LT |
1357 | signal_wake_up(t, 1); |
1358 | } | |
09faef11 ON |
1359 | |
1360 | return count; | |
1da177e4 LT |
1361 | } |
1362 | ||
b8ed374e NK |
1363 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1364 | unsigned long *flags) | |
f63ee72e ON |
1365 | { |
1366 | struct sighand_struct *sighand; | |
1367 | ||
59dc6f3c | 1368 | rcu_read_lock(); |
f63ee72e ON |
1369 | for (;;) { |
1370 | sighand = rcu_dereference(tsk->sighand); | |
59dc6f3c | 1371 | if (unlikely(sighand == NULL)) |
f63ee72e | 1372 | break; |
59dc6f3c | 1373 | |
392809b2 ON |
1374 | /* |
1375 | * This sighand can be already freed and even reused, but | |
5f0d5a3a | 1376 | * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which |
392809b2 ON |
1377 | * initializes ->siglock: this slab can't go away, it has |
1378 | * the same object type, ->siglock can't be reinitialized. | |
1379 | * | |
1380 | * We need to ensure that tsk->sighand is still the same | |
1381 | * after we take the lock, we can race with de_thread() or | |
1382 | * __exit_signal(). In the latter case the next iteration | |
1383 | * must see ->sighand == NULL. | |
1384 | */ | |
59dc6f3c AMG |
1385 | spin_lock_irqsave(&sighand->siglock, *flags); |
1386 | if (likely(sighand == tsk->sighand)) | |
f63ee72e | 1387 | break; |
59dc6f3c | 1388 | spin_unlock_irqrestore(&sighand->siglock, *flags); |
f63ee72e | 1389 | } |
59dc6f3c | 1390 | rcu_read_unlock(); |
f63ee72e ON |
1391 | |
1392 | return sighand; | |
1393 | } | |
1394 | ||
c69e8d9c DH |
1395 | /* |
1396 | * send signal info to all the members of a group | |
c69e8d9c | 1397 | */ |
ae7795bc EB |
1398 | int group_send_sig_info(int sig, struct kernel_siginfo *info, |
1399 | struct task_struct *p, enum pid_type type) | |
1da177e4 | 1400 | { |
694f690d DH |
1401 | int ret; |
1402 | ||
1403 | rcu_read_lock(); | |
1404 | ret = check_kill_permission(sig, info, p); | |
1405 | rcu_read_unlock(); | |
f63ee72e | 1406 | |
4a30debf | 1407 | if (!ret && sig) |
40b3b025 | 1408 | ret = do_send_sig_info(sig, info, p, type); |
1da177e4 LT |
1409 | |
1410 | return ret; | |
1411 | } | |
1412 | ||
1413 | /* | |
146a505d | 1414 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1da177e4 | 1415 | * control characters do (^C, ^Z etc) |
c69e8d9c | 1416 | * - the caller must hold at least a readlock on tasklist_lock |
1da177e4 | 1417 | */ |
ae7795bc | 1418 | int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) |
1da177e4 LT |
1419 | { |
1420 | struct task_struct *p = NULL; | |
1421 | int retval, success; | |
1422 | ||
1da177e4 LT |
1423 | success = 0; |
1424 | retval = -ESRCH; | |
c4b92fc1 | 1425 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
01024980 | 1426 | int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); |
1da177e4 LT |
1427 | success |= !err; |
1428 | retval = err; | |
c4b92fc1 | 1429 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4 LT |
1430 | return success ? 0 : retval; |
1431 | } | |
1432 | ||
ae7795bc | 1433 | int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) |
1da177e4 | 1434 | { |
d36174bc | 1435 | int error = -ESRCH; |
1da177e4 LT |
1436 | struct task_struct *p; |
1437 | ||
eca1a089 PM |
1438 | for (;;) { |
1439 | rcu_read_lock(); | |
1440 | p = pid_task(pid, PIDTYPE_PID); | |
1441 | if (p) | |
01024980 | 1442 | error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); |
eca1a089 PM |
1443 | rcu_read_unlock(); |
1444 | if (likely(!p || error != -ESRCH)) | |
1445 | return error; | |
6ca25b55 | 1446 | |
eca1a089 PM |
1447 | /* |
1448 | * The task was unhashed in between, try again. If it | |
1449 | * is dead, pid_task() will return NULL, if we race with | |
1450 | * de_thread() it will find the new leader. | |
1451 | */ | |
1452 | } | |
1da177e4 LT |
1453 | } |
1454 | ||
ae7795bc | 1455 | static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) |
c4b92fc1 EB |
1456 | { |
1457 | int error; | |
1458 | rcu_read_lock(); | |
b488893a | 1459 | error = kill_pid_info(sig, info, find_vpid(pid)); |
c4b92fc1 EB |
1460 | rcu_read_unlock(); |
1461 | return error; | |
1462 | } | |
1463 | ||
bb17fcca CB |
1464 | static inline bool kill_as_cred_perm(const struct cred *cred, |
1465 | struct task_struct *target) | |
d178bc3a SH |
1466 | { |
1467 | const struct cred *pcred = __task_cred(target); | |
bb17fcca CB |
1468 | |
1469 | return uid_eq(cred->euid, pcred->suid) || | |
1470 | uid_eq(cred->euid, pcred->uid) || | |
1471 | uid_eq(cred->uid, pcred->suid) || | |
1472 | uid_eq(cred->uid, pcred->uid); | |
d178bc3a SH |
1473 | } |
1474 | ||
70f1b0d3 EB |
1475 | /* |
1476 | * The usb asyncio usage of siginfo is wrong. The glibc support | |
1477 | * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT. | |
1478 | * AKA after the generic fields: | |
1479 | * kernel_pid_t si_pid; | |
1480 | * kernel_uid32_t si_uid; | |
1481 | * sigval_t si_value; | |
1482 | * | |
1483 | * Unfortunately when usb generates SI_ASYNCIO it assumes the layout | |
1484 | * after the generic fields is: | |
1485 | * void __user *si_addr; | |
1486 | * | |
1487 | * This is a practical problem when there is a 64bit big endian kernel | |
1488 | * and a 32bit userspace. As the 32bit address will encoded in the low | |
1489 | * 32bits of the pointer. Those low 32bits will be stored at higher | |
1490 | * address than appear in a 32 bit pointer. So userspace will not | |
1491 | * see the address it was expecting for it's completions. | |
1492 | * | |
1493 | * There is nothing in the encoding that can allow | |
1494 | * copy_siginfo_to_user32 to detect this confusion of formats, so | |
1495 | * handle this by requiring the caller of kill_pid_usb_asyncio to | |
1496 | * notice when this situration takes place and to store the 32bit | |
1497 | * pointer in sival_int, instead of sival_addr of the sigval_t addr | |
1498 | * parameter. | |
1499 | */ | |
1500 | int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, | |
1501 | struct pid *pid, const struct cred *cred) | |
46113830 | 1502 | { |
70f1b0d3 | 1503 | struct kernel_siginfo info; |
46113830 | 1504 | struct task_struct *p; |
14d8c9f3 | 1505 | unsigned long flags; |
70f1b0d3 EB |
1506 | int ret = -EINVAL; |
1507 | ||
1508 | clear_siginfo(&info); | |
1509 | info.si_signo = sig; | |
1510 | info.si_errno = errno; | |
1511 | info.si_code = SI_ASYNCIO; | |
1512 | *((sigval_t *)&info.si_pid) = addr; | |
46113830 HW |
1513 | |
1514 | if (!valid_signal(sig)) | |
1515 | return ret; | |
1516 | ||
14d8c9f3 | 1517 | rcu_read_lock(); |
2425c08b | 1518 | p = pid_task(pid, PIDTYPE_PID); |
46113830 HW |
1519 | if (!p) { |
1520 | ret = -ESRCH; | |
1521 | goto out_unlock; | |
1522 | } | |
70f1b0d3 | 1523 | if (!kill_as_cred_perm(cred, p)) { |
46113830 HW |
1524 | ret = -EPERM; |
1525 | goto out_unlock; | |
1526 | } | |
70f1b0d3 | 1527 | ret = security_task_kill(p, &info, sig, cred); |
8f95dc58 DQ |
1528 | if (ret) |
1529 | goto out_unlock; | |
14d8c9f3 TG |
1530 | |
1531 | if (sig) { | |
1532 | if (lock_task_sighand(p, &flags)) { | |
8ad23dea | 1533 | ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false); |
14d8c9f3 TG |
1534 | unlock_task_sighand(p, &flags); |
1535 | } else | |
1536 | ret = -ESRCH; | |
46113830 HW |
1537 | } |
1538 | out_unlock: | |
14d8c9f3 | 1539 | rcu_read_unlock(); |
46113830 HW |
1540 | return ret; |
1541 | } | |
70f1b0d3 | 1542 | EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio); |
1da177e4 LT |
1543 | |
1544 | /* | |
1545 | * kill_something_info() interprets pid in interesting ways just like kill(2). | |
1546 | * | |
1547 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have | |
1548 | * is probably wrong. Should make it like BSD or SYSV. | |
1549 | */ | |
1550 | ||
ae7795bc | 1551 | static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) |
1da177e4 | 1552 | { |
8d42db18 | 1553 | int ret; |
d5df763b PE |
1554 | |
1555 | if (pid > 0) { | |
1556 | rcu_read_lock(); | |
1557 | ret = kill_pid_info(sig, info, find_vpid(pid)); | |
1558 | rcu_read_unlock(); | |
1559 | return ret; | |
1560 | } | |
1561 | ||
4ea77014 | 1562 | /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ |
1563 | if (pid == INT_MIN) | |
1564 | return -ESRCH; | |
1565 | ||
d5df763b PE |
1566 | read_lock(&tasklist_lock); |
1567 | if (pid != -1) { | |
1568 | ret = __kill_pgrp_info(sig, info, | |
1569 | pid ? find_vpid(-pid) : task_pgrp(current)); | |
1570 | } else { | |
1da177e4 LT |
1571 | int retval = 0, count = 0; |
1572 | struct task_struct * p; | |
1573 | ||
1da177e4 | 1574 | for_each_process(p) { |
d25141a8 SB |
1575 | if (task_pid_vnr(p) > 1 && |
1576 | !same_thread_group(p, current)) { | |
01024980 EB |
1577 | int err = group_send_sig_info(sig, info, p, |
1578 | PIDTYPE_MAX); | |
1da177e4 LT |
1579 | ++count; |
1580 | if (err != -EPERM) | |
1581 | retval = err; | |
1582 | } | |
1583 | } | |
8d42db18 | 1584 | ret = count ? retval : -ESRCH; |
1da177e4 | 1585 | } |
d5df763b PE |
1586 | read_unlock(&tasklist_lock); |
1587 | ||
8d42db18 | 1588 | return ret; |
1da177e4 LT |
1589 | } |
1590 | ||
1591 | /* | |
1592 | * These are for backward compatibility with the rest of the kernel source. | |
1593 | */ | |
1594 | ||
ae7795bc | 1595 | int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) |
1da177e4 | 1596 | { |
1da177e4 LT |
1597 | /* |
1598 | * Make sure legacy kernel users don't send in bad values | |
1599 | * (normal paths check this in check_kill_permission). | |
1600 | */ | |
7ed20e1a | 1601 | if (!valid_signal(sig)) |
1da177e4 LT |
1602 | return -EINVAL; |
1603 | ||
40b3b025 | 1604 | return do_send_sig_info(sig, info, p, PIDTYPE_PID); |
1da177e4 | 1605 | } |
fb50f5a4 | 1606 | EXPORT_SYMBOL(send_sig_info); |
1da177e4 | 1607 | |
b67a1b9e ON |
1608 | #define __si_special(priv) \ |
1609 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) | |
1610 | ||
1da177e4 LT |
1611 | int |
1612 | send_sig(int sig, struct task_struct *p, int priv) | |
1613 | { | |
b67a1b9e | 1614 | return send_sig_info(sig, __si_special(priv), p); |
1da177e4 | 1615 | } |
fb50f5a4 | 1616 | EXPORT_SYMBOL(send_sig); |
1da177e4 | 1617 | |
3cf5d076 | 1618 | void force_sig(int sig) |
1da177e4 | 1619 | { |
ffafd23b EB |
1620 | struct kernel_siginfo info; |
1621 | ||
1622 | clear_siginfo(&info); | |
1623 | info.si_signo = sig; | |
1624 | info.si_errno = 0; | |
1625 | info.si_code = SI_KERNEL; | |
1626 | info.si_pid = 0; | |
1627 | info.si_uid = 0; | |
a89e9b8a | 1628 | force_sig_info(&info); |
1da177e4 | 1629 | } |
fb50f5a4 | 1630 | EXPORT_SYMBOL(force_sig); |
1da177e4 LT |
1631 | |
1632 | /* | |
1633 | * When things go south during signal handling, we | |
1634 | * will force a SIGSEGV. And if the signal that caused | |
1635 | * the problem was already a SIGSEGV, we'll want to | |
1636 | * make sure we don't even try to deliver the signal.. | |
1637 | */ | |
cb44c9a0 | 1638 | void force_sigsegv(int sig) |
1da177e4 | 1639 | { |
cb44c9a0 EB |
1640 | struct task_struct *p = current; |
1641 | ||
1da177e4 LT |
1642 | if (sig == SIGSEGV) { |
1643 | unsigned long flags; | |
1644 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1645 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | |
1646 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1647 | } | |
3cf5d076 | 1648 | force_sig(SIGSEGV); |
1da177e4 LT |
1649 | } |
1650 | ||
91ca180d | 1651 | int force_sig_fault_to_task(int sig, int code, void __user *addr |
f8ec6601 EB |
1652 | ___ARCH_SI_TRAPNO(int trapno) |
1653 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) | |
1654 | , struct task_struct *t) | |
1655 | { | |
ae7795bc | 1656 | struct kernel_siginfo info; |
f8ec6601 EB |
1657 | |
1658 | clear_siginfo(&info); | |
1659 | info.si_signo = sig; | |
1660 | info.si_errno = 0; | |
1661 | info.si_code = code; | |
1662 | info.si_addr = addr; | |
1663 | #ifdef __ARCH_SI_TRAPNO | |
1664 | info.si_trapno = trapno; | |
1665 | #endif | |
1666 | #ifdef __ia64__ | |
1667 | info.si_imm = imm; | |
1668 | info.si_flags = flags; | |
1669 | info.si_isr = isr; | |
1670 | #endif | |
59c0e696 | 1671 | return force_sig_info_to_task(&info, t); |
f8ec6601 EB |
1672 | } |
1673 | ||
91ca180d EB |
1674 | int force_sig_fault(int sig, int code, void __user *addr |
1675 | ___ARCH_SI_TRAPNO(int trapno) | |
2e1661d2 | 1676 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)) |
91ca180d EB |
1677 | { |
1678 | return force_sig_fault_to_task(sig, code, addr | |
1679 | ___ARCH_SI_TRAPNO(trapno) | |
2e1661d2 | 1680 | ___ARCH_SI_IA64(imm, flags, isr), current); |
f8ec6601 EB |
1681 | } |
1682 | ||
1683 | int send_sig_fault(int sig, int code, void __user *addr | |
1684 | ___ARCH_SI_TRAPNO(int trapno) | |
1685 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) | |
1686 | , struct task_struct *t) | |
1687 | { | |
ae7795bc | 1688 | struct kernel_siginfo info; |
f8ec6601 EB |
1689 | |
1690 | clear_siginfo(&info); | |
1691 | info.si_signo = sig; | |
1692 | info.si_errno = 0; | |
1693 | info.si_code = code; | |
1694 | info.si_addr = addr; | |
1695 | #ifdef __ARCH_SI_TRAPNO | |
1696 | info.si_trapno = trapno; | |
1697 | #endif | |
1698 | #ifdef __ia64__ | |
1699 | info.si_imm = imm; | |
1700 | info.si_flags = flags; | |
1701 | info.si_isr = isr; | |
1702 | #endif | |
1703 | return send_sig_info(info.si_signo, &info, t); | |
1704 | } | |
1705 | ||
f8eac901 | 1706 | int force_sig_mceerr(int code, void __user *addr, short lsb) |
38246735 | 1707 | { |
ae7795bc | 1708 | struct kernel_siginfo info; |
38246735 EB |
1709 | |
1710 | WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); | |
1711 | clear_siginfo(&info); | |
1712 | info.si_signo = SIGBUS; | |
1713 | info.si_errno = 0; | |
1714 | info.si_code = code; | |
1715 | info.si_addr = addr; | |
1716 | info.si_addr_lsb = lsb; | |
a89e9b8a | 1717 | return force_sig_info(&info); |
38246735 EB |
1718 | } |
1719 | ||
1720 | int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) | |
1721 | { | |
ae7795bc | 1722 | struct kernel_siginfo info; |
38246735 EB |
1723 | |
1724 | WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); | |
1725 | clear_siginfo(&info); | |
1726 | info.si_signo = SIGBUS; | |
1727 | info.si_errno = 0; | |
1728 | info.si_code = code; | |
1729 | info.si_addr = addr; | |
1730 | info.si_addr_lsb = lsb; | |
1731 | return send_sig_info(info.si_signo, &info, t); | |
1732 | } | |
1733 | EXPORT_SYMBOL(send_sig_mceerr); | |
38246735 | 1734 | |
38246735 EB |
1735 | int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) |
1736 | { | |
ae7795bc | 1737 | struct kernel_siginfo info; |
38246735 EB |
1738 | |
1739 | clear_siginfo(&info); | |
1740 | info.si_signo = SIGSEGV; | |
1741 | info.si_errno = 0; | |
1742 | info.si_code = SEGV_BNDERR; | |
1743 | info.si_addr = addr; | |
1744 | info.si_lower = lower; | |
1745 | info.si_upper = upper; | |
a89e9b8a | 1746 | return force_sig_info(&info); |
38246735 | 1747 | } |
38246735 EB |
1748 | |
1749 | #ifdef SEGV_PKUERR | |
1750 | int force_sig_pkuerr(void __user *addr, u32 pkey) | |
1751 | { | |
ae7795bc | 1752 | struct kernel_siginfo info; |
38246735 EB |
1753 | |
1754 | clear_siginfo(&info); | |
1755 | info.si_signo = SIGSEGV; | |
1756 | info.si_errno = 0; | |
1757 | info.si_code = SEGV_PKUERR; | |
1758 | info.si_addr = addr; | |
1759 | info.si_pkey = pkey; | |
a89e9b8a | 1760 | return force_sig_info(&info); |
38246735 EB |
1761 | } |
1762 | #endif | |
f8ec6601 | 1763 | |
f71dd7dc EB |
1764 | /* For the crazy architectures that include trap information in |
1765 | * the errno field, instead of an actual errno value. | |
1766 | */ | |
1767 | int force_sig_ptrace_errno_trap(int errno, void __user *addr) | |
1768 | { | |
ae7795bc | 1769 | struct kernel_siginfo info; |
f71dd7dc EB |
1770 | |
1771 | clear_siginfo(&info); | |
1772 | info.si_signo = SIGTRAP; | |
1773 | info.si_errno = errno; | |
1774 | info.si_code = TRAP_HWBKPT; | |
1775 | info.si_addr = addr; | |
a89e9b8a | 1776 | return force_sig_info(&info); |
f71dd7dc EB |
1777 | } |
1778 | ||
c4b92fc1 EB |
1779 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1780 | { | |
146a505d PE |
1781 | int ret; |
1782 | ||
1783 | read_lock(&tasklist_lock); | |
1784 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); | |
1785 | read_unlock(&tasklist_lock); | |
1786 | ||
1787 | return ret; | |
c4b92fc1 EB |
1788 | } |
1789 | EXPORT_SYMBOL(kill_pgrp); | |
1790 | ||
1791 | int kill_pid(struct pid *pid, int sig, int priv) | |
1792 | { | |
1793 | return kill_pid_info(sig, __si_special(priv), pid); | |
1794 | } | |
1795 | EXPORT_SYMBOL(kill_pid); | |
1796 | ||
1da177e4 LT |
1797 | /* |
1798 | * These functions support sending signals using preallocated sigqueue | |
1799 | * structures. This is needed "because realtime applications cannot | |
1800 | * afford to lose notifications of asynchronous events, like timer | |
5aba085e | 1801 | * expirations or I/O completions". In the case of POSIX Timers |
1da177e4 LT |
1802 | * we allocate the sigqueue structure from the timer_create. If this |
1803 | * allocation fails we are able to report the failure to the application | |
1804 | * with an EAGAIN error. | |
1805 | */ | |
1da177e4 LT |
1806 | struct sigqueue *sigqueue_alloc(void) |
1807 | { | |
f84d49b2 | 1808 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1da177e4 | 1809 | |
f84d49b2 | 1810 | if (q) |
1da177e4 | 1811 | q->flags |= SIGQUEUE_PREALLOC; |
f84d49b2 NO |
1812 | |
1813 | return q; | |
1da177e4 LT |
1814 | } |
1815 | ||
1816 | void sigqueue_free(struct sigqueue *q) | |
1817 | { | |
1818 | unsigned long flags; | |
60187d27 ON |
1819 | spinlock_t *lock = ¤t->sighand->siglock; |
1820 | ||
1da177e4 LT |
1821 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1822 | /* | |
c8e85b4f ON |
1823 | * We must hold ->siglock while testing q->list |
1824 | * to serialize with collect_signal() or with | |
da7978b0 | 1825 | * __exit_signal()->flush_sigqueue(). |
1da177e4 | 1826 | */ |
60187d27 | 1827 | spin_lock_irqsave(lock, flags); |
c8e85b4f ON |
1828 | q->flags &= ~SIGQUEUE_PREALLOC; |
1829 | /* | |
1830 | * If it is queued it will be freed when dequeued, | |
1831 | * like the "regular" sigqueue. | |
1832 | */ | |
60187d27 | 1833 | if (!list_empty(&q->list)) |
c8e85b4f | 1834 | q = NULL; |
60187d27 ON |
1835 | spin_unlock_irqrestore(lock, flags); |
1836 | ||
c8e85b4f ON |
1837 | if (q) |
1838 | __sigqueue_free(q); | |
1da177e4 LT |
1839 | } |
1840 | ||
24122c7f | 1841 | int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) |
9e3bd6c3 | 1842 | { |
e62e6650 | 1843 | int sig = q->info.si_signo; |
2ca3515a | 1844 | struct sigpending *pending; |
24122c7f | 1845 | struct task_struct *t; |
e62e6650 | 1846 | unsigned long flags; |
163566f6 | 1847 | int ret, result; |
2ca3515a | 1848 | |
4cd4b6d4 | 1849 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
e62e6650 ON |
1850 | |
1851 | ret = -1; | |
24122c7f EB |
1852 | rcu_read_lock(); |
1853 | t = pid_task(pid, type); | |
1854 | if (!t || !likely(lock_task_sighand(t, &flags))) | |
e62e6650 ON |
1855 | goto ret; |
1856 | ||
7e695a5e | 1857 | ret = 1; /* the signal is ignored */ |
163566f6 | 1858 | result = TRACE_SIGNAL_IGNORED; |
def8cf72 | 1859 | if (!prepare_signal(sig, t, false)) |
e62e6650 ON |
1860 | goto out; |
1861 | ||
1862 | ret = 0; | |
9e3bd6c3 PE |
1863 | if (unlikely(!list_empty(&q->list))) { |
1864 | /* | |
1865 | * If an SI_TIMER entry is already queue just increment | |
1866 | * the overrun count. | |
1867 | */ | |
9e3bd6c3 PE |
1868 | BUG_ON(q->info.si_code != SI_TIMER); |
1869 | q->info.si_overrun++; | |
163566f6 | 1870 | result = TRACE_SIGNAL_ALREADY_PENDING; |
e62e6650 | 1871 | goto out; |
9e3bd6c3 | 1872 | } |
ba661292 | 1873 | q->info.si_overrun = 0; |
9e3bd6c3 | 1874 | |
9e3bd6c3 | 1875 | signalfd_notify(t, sig); |
24122c7f | 1876 | pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; |
9e3bd6c3 PE |
1877 | list_add_tail(&q->list, &pending->list); |
1878 | sigaddset(&pending->signal, sig); | |
07296149 | 1879 | complete_signal(sig, t, type); |
163566f6 | 1880 | result = TRACE_SIGNAL_DELIVERED; |
e62e6650 | 1881 | out: |
24122c7f | 1882 | trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); |
e62e6650 ON |
1883 | unlock_task_sighand(t, &flags); |
1884 | ret: | |
24122c7f | 1885 | rcu_read_unlock(); |
e62e6650 | 1886 | return ret; |
9e3bd6c3 PE |
1887 | } |
1888 | ||
b53b0b9d JFG |
1889 | static void do_notify_pidfd(struct task_struct *task) |
1890 | { | |
1891 | struct pid *pid; | |
1892 | ||
1caf7d50 | 1893 | WARN_ON(task->exit_state == 0); |
b53b0b9d JFG |
1894 | pid = task_pid(task); |
1895 | wake_up_all(&pid->wait_pidfd); | |
1896 | } | |
1897 | ||
1da177e4 LT |
1898 | /* |
1899 | * Let a parent know about the death of a child. | |
1900 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | |
2b2a1ff6 | 1901 | * |
53c8f9f1 ON |
1902 | * Returns true if our parent ignored us and so we've switched to |
1903 | * self-reaping. | |
1da177e4 | 1904 | */ |
53c8f9f1 | 1905 | bool do_notify_parent(struct task_struct *tsk, int sig) |
1da177e4 | 1906 | { |
ae7795bc | 1907 | struct kernel_siginfo info; |
1da177e4 LT |
1908 | unsigned long flags; |
1909 | struct sighand_struct *psig; | |
53c8f9f1 | 1910 | bool autoreap = false; |
bde8285e | 1911 | u64 utime, stime; |
1da177e4 LT |
1912 | |
1913 | BUG_ON(sig == -1); | |
1914 | ||
1915 | /* do_notify_parent_cldstop should have been called instead. */ | |
e1abb39c | 1916 | BUG_ON(task_is_stopped_or_traced(tsk)); |
1da177e4 | 1917 | |
d21142ec | 1918 | BUG_ON(!tsk->ptrace && |
1da177e4 LT |
1919 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
1920 | ||
b53b0b9d JFG |
1921 | /* Wake up all pidfd waiters */ |
1922 | do_notify_pidfd(tsk); | |
1923 | ||
b6e238dc ON |
1924 | if (sig != SIGCHLD) { |
1925 | /* | |
1926 | * This is only possible if parent == real_parent. | |
1927 | * Check if it has changed security domain. | |
1928 | */ | |
1929 | if (tsk->parent_exec_id != tsk->parent->self_exec_id) | |
1930 | sig = SIGCHLD; | |
1931 | } | |
1932 | ||
faf1f22b | 1933 | clear_siginfo(&info); |
1da177e4 LT |
1934 | info.si_signo = sig; |
1935 | info.si_errno = 0; | |
b488893a | 1936 | /* |
32084504 EB |
1937 | * We are under tasklist_lock here so our parent is tied to |
1938 | * us and cannot change. | |
b488893a | 1939 | * |
32084504 EB |
1940 | * task_active_pid_ns will always return the same pid namespace |
1941 | * until a task passes through release_task. | |
b488893a PE |
1942 | * |
1943 | * write_lock() currently calls preempt_disable() which is the | |
1944 | * same as rcu_read_lock(), but according to Oleg, this is not | |
1945 | * correct to rely on this | |
1946 | */ | |
1947 | rcu_read_lock(); | |
32084504 | 1948 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); |
54ba47ed EB |
1949 | info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), |
1950 | task_uid(tsk)); | |
b488893a PE |
1951 | rcu_read_unlock(); |
1952 | ||
bde8285e FW |
1953 | task_cputime(tsk, &utime, &stime); |
1954 | info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); | |
1955 | info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); | |
1da177e4 LT |
1956 | |
1957 | info.si_status = tsk->exit_code & 0x7f; | |
1958 | if (tsk->exit_code & 0x80) | |
1959 | info.si_code = CLD_DUMPED; | |
1960 | else if (tsk->exit_code & 0x7f) | |
1961 | info.si_code = CLD_KILLED; | |
1962 | else { | |
1963 | info.si_code = CLD_EXITED; | |
1964 | info.si_status = tsk->exit_code >> 8; | |
1965 | } | |
1966 | ||
1967 | psig = tsk->parent->sighand; | |
1968 | spin_lock_irqsave(&psig->siglock, flags); | |
d21142ec | 1969 | if (!tsk->ptrace && sig == SIGCHLD && |
1da177e4 LT |
1970 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1971 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { | |
1972 | /* | |
1973 | * We are exiting and our parent doesn't care. POSIX.1 | |
1974 | * defines special semantics for setting SIGCHLD to SIG_IGN | |
1975 | * or setting the SA_NOCLDWAIT flag: we should be reaped | |
1976 | * automatically and not left for our parent's wait4 call. | |
1977 | * Rather than having the parent do it as a magic kind of | |
1978 | * signal handler, we just set this to tell do_exit that we | |
1979 | * can be cleaned up without becoming a zombie. Note that | |
1980 | * we still call __wake_up_parent in this case, because a | |
1981 | * blocked sys_wait4 might now return -ECHILD. | |
1982 | * | |
1983 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT | |
1984 | * is implementation-defined: we do (if you don't want | |
1985 | * it, just use SIG_IGN instead). | |
1986 | */ | |
53c8f9f1 | 1987 | autoreap = true; |
1da177e4 | 1988 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
53c8f9f1 | 1989 | sig = 0; |
1da177e4 | 1990 | } |
53c8f9f1 | 1991 | if (valid_signal(sig) && sig) |
1da177e4 LT |
1992 | __group_send_sig_info(sig, &info, tsk->parent); |
1993 | __wake_up_parent(tsk, tsk->parent); | |
1994 | spin_unlock_irqrestore(&psig->siglock, flags); | |
2b2a1ff6 | 1995 | |
53c8f9f1 | 1996 | return autoreap; |
1da177e4 LT |
1997 | } |
1998 | ||
75b95953 TH |
1999 | /** |
2000 | * do_notify_parent_cldstop - notify parent of stopped/continued state change | |
2001 | * @tsk: task reporting the state change | |
2002 | * @for_ptracer: the notification is for ptracer | |
2003 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | |
2004 | * | |
2005 | * Notify @tsk's parent that the stopped/continued state has changed. If | |
2006 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | |
2007 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. | |
2008 | * | |
2009 | * CONTEXT: | |
2010 | * Must be called with tasklist_lock at least read locked. | |
2011 | */ | |
2012 | static void do_notify_parent_cldstop(struct task_struct *tsk, | |
2013 | bool for_ptracer, int why) | |
1da177e4 | 2014 | { |
ae7795bc | 2015 | struct kernel_siginfo info; |
1da177e4 | 2016 | unsigned long flags; |
bc505a47 | 2017 | struct task_struct *parent; |
1da177e4 | 2018 | struct sighand_struct *sighand; |
bde8285e | 2019 | u64 utime, stime; |
1da177e4 | 2020 | |
75b95953 | 2021 | if (for_ptracer) { |
bc505a47 | 2022 | parent = tsk->parent; |
75b95953 | 2023 | } else { |
bc505a47 ON |
2024 | tsk = tsk->group_leader; |
2025 | parent = tsk->real_parent; | |
2026 | } | |
2027 | ||
faf1f22b | 2028 | clear_siginfo(&info); |
1da177e4 LT |
2029 | info.si_signo = SIGCHLD; |
2030 | info.si_errno = 0; | |
b488893a | 2031 | /* |
5aba085e | 2032 | * see comment in do_notify_parent() about the following 4 lines |
b488893a PE |
2033 | */ |
2034 | rcu_read_lock(); | |
17cf22c3 | 2035 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); |
54ba47ed | 2036 | info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); |
b488893a PE |
2037 | rcu_read_unlock(); |
2038 | ||
bde8285e FW |
2039 | task_cputime(tsk, &utime, &stime); |
2040 | info.si_utime = nsec_to_clock_t(utime); | |
2041 | info.si_stime = nsec_to_clock_t(stime); | |
1da177e4 LT |
2042 | |
2043 | info.si_code = why; | |
2044 | switch (why) { | |
2045 | case CLD_CONTINUED: | |
2046 | info.si_status = SIGCONT; | |
2047 | break; | |
2048 | case CLD_STOPPED: | |
2049 | info.si_status = tsk->signal->group_exit_code & 0x7f; | |
2050 | break; | |
2051 | case CLD_TRAPPED: | |
2052 | info.si_status = tsk->exit_code & 0x7f; | |
2053 | break; | |
2054 | default: | |
2055 | BUG(); | |
2056 | } | |
2057 | ||
2058 | sighand = parent->sighand; | |
2059 | spin_lock_irqsave(&sighand->siglock, flags); | |
2060 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && | |
2061 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) | |
2062 | __group_send_sig_info(SIGCHLD, &info, parent); | |
2063 | /* | |
2064 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. | |
2065 | */ | |
2066 | __wake_up_parent(tsk, parent); | |
2067 | spin_unlock_irqrestore(&sighand->siglock, flags); | |
2068 | } | |
2069 | ||
6527de95 | 2070 | static inline bool may_ptrace_stop(void) |
d5f70c00 | 2071 | { |
d21142ec | 2072 | if (!likely(current->ptrace)) |
6527de95 | 2073 | return false; |
d5f70c00 ON |
2074 | /* |
2075 | * Are we in the middle of do_coredump? | |
2076 | * If so and our tracer is also part of the coredump stopping | |
2077 | * is a deadlock situation, and pointless because our tracer | |
2078 | * is dead so don't allow us to stop. | |
2079 | * If SIGKILL was already sent before the caller unlocked | |
999d9fc1 | 2080 | * ->siglock we must see ->core_state != NULL. Otherwise it |
d5f70c00 | 2081 | * is safe to enter schedule(). |
9899d11f ON |
2082 | * |
2083 | * This is almost outdated, a task with the pending SIGKILL can't | |
2084 | * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported | |
2085 | * after SIGKILL was already dequeued. | |
d5f70c00 | 2086 | */ |
999d9fc1 | 2087 | if (unlikely(current->mm->core_state) && |
d5f70c00 | 2088 | unlikely(current->mm == current->parent->mm)) |
6527de95 | 2089 | return false; |
d5f70c00 | 2090 | |
6527de95 | 2091 | return true; |
d5f70c00 ON |
2092 | } |
2093 | ||
1a669c2f | 2094 | /* |
5aba085e | 2095 | * Return non-zero if there is a SIGKILL that should be waking us up. |
1a669c2f RM |
2096 | * Called with the siglock held. |
2097 | */ | |
f99e9d8c | 2098 | static bool sigkill_pending(struct task_struct *tsk) |
1a669c2f | 2099 | { |
f99e9d8c CB |
2100 | return sigismember(&tsk->pending.signal, SIGKILL) || |
2101 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); | |
1a669c2f RM |
2102 | } |
2103 | ||
1da177e4 LT |
2104 | /* |
2105 | * This must be called with current->sighand->siglock held. | |
2106 | * | |
2107 | * This should be the path for all ptrace stops. | |
2108 | * We always set current->last_siginfo while stopped here. | |
2109 | * That makes it a way to test a stopped process for | |
2110 | * being ptrace-stopped vs being job-control-stopped. | |
2111 | * | |
20686a30 ON |
2112 | * If we actually decide not to stop at all because the tracer |
2113 | * is gone, we keep current->exit_code unless clear_code. | |
1da177e4 | 2114 | */ |
ae7795bc | 2115 | static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info) |
b8401150 NK |
2116 | __releases(¤t->sighand->siglock) |
2117 | __acquires(¤t->sighand->siglock) | |
1da177e4 | 2118 | { |
ceb6bd67 TH |
2119 | bool gstop_done = false; |
2120 | ||
1a669c2f RM |
2121 | if (arch_ptrace_stop_needed(exit_code, info)) { |
2122 | /* | |
2123 | * The arch code has something special to do before a | |
2124 | * ptrace stop. This is allowed to block, e.g. for faults | |
2125 | * on user stack pages. We can't keep the siglock while | |
2126 | * calling arch_ptrace_stop, so we must release it now. | |
2127 | * To preserve proper semantics, we must do this before | |
2128 | * any signal bookkeeping like checking group_stop_count. | |
2129 | * Meanwhile, a SIGKILL could come in before we retake the | |
2130 | * siglock. That must prevent us from sleeping in TASK_TRACED. | |
2131 | * So after regaining the lock, we must check for SIGKILL. | |
2132 | */ | |
2133 | spin_unlock_irq(¤t->sighand->siglock); | |
2134 | arch_ptrace_stop(exit_code, info); | |
2135 | spin_lock_irq(¤t->sighand->siglock); | |
3d749b9e ON |
2136 | if (sigkill_pending(current)) |
2137 | return; | |
1a669c2f RM |
2138 | } |
2139 | ||
b5bf9a90 PZ |
2140 | set_special_state(TASK_TRACED); |
2141 | ||
1da177e4 | 2142 | /* |
81be24b8 TH |
2143 | * We're committing to trapping. TRACED should be visible before |
2144 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). | |
2145 | * Also, transition to TRACED and updates to ->jobctl should be | |
2146 | * atomic with respect to siglock and should be done after the arch | |
2147 | * hook as siglock is released and regrabbed across it. | |
b5bf9a90 PZ |
2148 | * |
2149 | * TRACER TRACEE | |
2150 | * | |
2151 | * ptrace_attach() | |
2152 | * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) | |
2153 | * do_wait() | |
2154 | * set_current_state() smp_wmb(); | |
2155 | * ptrace_do_wait() | |
2156 | * wait_task_stopped() | |
2157 | * task_stopped_code() | |
2158 | * [L] task_is_traced() [S] task_clear_jobctl_trapping(); | |
1da177e4 | 2159 | */ |
b5bf9a90 | 2160 | smp_wmb(); |
1da177e4 LT |
2161 | |
2162 | current->last_siginfo = info; | |
2163 | current->exit_code = exit_code; | |
2164 | ||
d79fdd6d | 2165 | /* |
0ae8ce1c TH |
2166 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
2167 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered | |
73ddff2b TH |
2168 | * across siglock relocks since INTERRUPT was scheduled, PENDING |
2169 | * could be clear now. We act as if SIGCONT is received after | |
2170 | * TASK_TRACED is entered - ignore it. | |
d79fdd6d | 2171 | */ |
a8f072c1 | 2172 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
ceb6bd67 | 2173 | gstop_done = task_participate_group_stop(current); |
d79fdd6d | 2174 | |
fb1d910c | 2175 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
73ddff2b | 2176 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
fb1d910c TH |
2177 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) |
2178 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); | |
73ddff2b | 2179 | |
81be24b8 | 2180 | /* entering a trap, clear TRAPPING */ |
a8f072c1 | 2181 | task_clear_jobctl_trapping(current); |
d79fdd6d | 2182 | |
1da177e4 LT |
2183 | spin_unlock_irq(¤t->sighand->siglock); |
2184 | read_lock(&tasklist_lock); | |
3d749b9e | 2185 | if (may_ptrace_stop()) { |
ceb6bd67 TH |
2186 | /* |
2187 | * Notify parents of the stop. | |
2188 | * | |
2189 | * While ptraced, there are two parents - the ptracer and | |
2190 | * the real_parent of the group_leader. The ptracer should | |
2191 | * know about every stop while the real parent is only | |
2192 | * interested in the completion of group stop. The states | |
2193 | * for the two don't interact with each other. Notify | |
2194 | * separately unless they're gonna be duplicates. | |
2195 | */ | |
2196 | do_notify_parent_cldstop(current, true, why); | |
bb3696da | 2197 | if (gstop_done && ptrace_reparented(current)) |
ceb6bd67 TH |
2198 | do_notify_parent_cldstop(current, false, why); |
2199 | ||
53da1d94 MS |
2200 | /* |
2201 | * Don't want to allow preemption here, because | |
2202 | * sys_ptrace() needs this task to be inactive. | |
2203 | * | |
2204 | * XXX: implement read_unlock_no_resched(). | |
2205 | */ | |
2206 | preempt_disable(); | |
1da177e4 | 2207 | read_unlock(&tasklist_lock); |
53da1d94 | 2208 | preempt_enable_no_resched(); |
76f969e8 | 2209 | cgroup_enter_frozen(); |
5d8f72b5 | 2210 | freezable_schedule(); |
05b28926 | 2211 | cgroup_leave_frozen(true); |
1da177e4 LT |
2212 | } else { |
2213 | /* | |
2214 | * By the time we got the lock, our tracer went away. | |
6405f7f4 | 2215 | * Don't drop the lock yet, another tracer may come. |
ceb6bd67 TH |
2216 | * |
2217 | * If @gstop_done, the ptracer went away between group stop | |
2218 | * completion and here. During detach, it would have set | |
a8f072c1 TH |
2219 | * JOBCTL_STOP_PENDING on us and we'll re-enter |
2220 | * TASK_STOPPED in do_signal_stop() on return, so notifying | |
2221 | * the real parent of the group stop completion is enough. | |
1da177e4 | 2222 | */ |
ceb6bd67 TH |
2223 | if (gstop_done) |
2224 | do_notify_parent_cldstop(current, false, why); | |
2225 | ||
9899d11f | 2226 | /* tasklist protects us from ptrace_freeze_traced() */ |
6405f7f4 | 2227 | __set_current_state(TASK_RUNNING); |
20686a30 ON |
2228 | if (clear_code) |
2229 | current->exit_code = 0; | |
6405f7f4 | 2230 | read_unlock(&tasklist_lock); |
1da177e4 LT |
2231 | } |
2232 | ||
2233 | /* | |
2234 | * We are back. Now reacquire the siglock before touching | |
2235 | * last_siginfo, so that we are sure to have synchronized with | |
2236 | * any signal-sending on another CPU that wants to examine it. | |
2237 | */ | |
2238 | spin_lock_irq(¤t->sighand->siglock); | |
2239 | current->last_siginfo = NULL; | |
2240 | ||
544b2c91 TH |
2241 | /* LISTENING can be set only during STOP traps, clear it */ |
2242 | current->jobctl &= ~JOBCTL_LISTENING; | |
2243 | ||
1da177e4 LT |
2244 | /* |
2245 | * Queued signals ignored us while we were stopped for tracing. | |
2246 | * So check for any that we should take before resuming user mode. | |
b74d0deb | 2247 | * This sets TIF_SIGPENDING, but never clears it. |
1da177e4 | 2248 | */ |
b74d0deb | 2249 | recalc_sigpending_tsk(current); |
1da177e4 LT |
2250 | } |
2251 | ||
3544d72a | 2252 | static void ptrace_do_notify(int signr, int exit_code, int why) |
1da177e4 | 2253 | { |
ae7795bc | 2254 | kernel_siginfo_t info; |
1da177e4 | 2255 | |
faf1f22b | 2256 | clear_siginfo(&info); |
3544d72a | 2257 | info.si_signo = signr; |
1da177e4 | 2258 | info.si_code = exit_code; |
b488893a | 2259 | info.si_pid = task_pid_vnr(current); |
078de5f7 | 2260 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1da177e4 LT |
2261 | |
2262 | /* Let the debugger run. */ | |
3544d72a TH |
2263 | ptrace_stop(exit_code, why, 1, &info); |
2264 | } | |
2265 | ||
2266 | void ptrace_notify(int exit_code) | |
2267 | { | |
2268 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); | |
f784e8a7 ON |
2269 | if (unlikely(current->task_works)) |
2270 | task_work_run(); | |
3544d72a | 2271 | |
1da177e4 | 2272 | spin_lock_irq(¤t->sighand->siglock); |
3544d72a | 2273 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
1da177e4 LT |
2274 | spin_unlock_irq(¤t->sighand->siglock); |
2275 | } | |
2276 | ||
73ddff2b TH |
2277 | /** |
2278 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals | |
2279 | * @signr: signr causing group stop if initiating | |
2280 | * | |
2281 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr | |
2282 | * and participate in it. If already set, participate in the existing | |
2283 | * group stop. If participated in a group stop (and thus slept), %true is | |
2284 | * returned with siglock released. | |
2285 | * | |
2286 | * If ptraced, this function doesn't handle stop itself. Instead, | |
2287 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock | |
2288 | * untouched. The caller must ensure that INTERRUPT trap handling takes | |
2289 | * places afterwards. | |
2290 | * | |
2291 | * CONTEXT: | |
2292 | * Must be called with @current->sighand->siglock held, which is released | |
2293 | * on %true return. | |
2294 | * | |
2295 | * RETURNS: | |
2296 | * %false if group stop is already cancelled or ptrace trap is scheduled. | |
2297 | * %true if participated in group stop. | |
1da177e4 | 2298 | */ |
73ddff2b TH |
2299 | static bool do_signal_stop(int signr) |
2300 | __releases(¤t->sighand->siglock) | |
1da177e4 LT |
2301 | { |
2302 | struct signal_struct *sig = current->signal; | |
1da177e4 | 2303 | |
a8f072c1 | 2304 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { |
b76808e6 | 2305 | unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
f558b7e4 ON |
2306 | struct task_struct *t; |
2307 | ||
a8f072c1 TH |
2308 | /* signr will be recorded in task->jobctl for retries */ |
2309 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); | |
d79fdd6d | 2310 | |
a8f072c1 | 2311 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
573cf9ad | 2312 | unlikely(signal_group_exit(sig))) |
73ddff2b | 2313 | return false; |
1da177e4 | 2314 | /* |
408a37de TH |
2315 | * There is no group stop already in progress. We must |
2316 | * initiate one now. | |
2317 | * | |
2318 | * While ptraced, a task may be resumed while group stop is | |
2319 | * still in effect and then receive a stop signal and | |
2320 | * initiate another group stop. This deviates from the | |
2321 | * usual behavior as two consecutive stop signals can't | |
780006ea ON |
2322 | * cause two group stops when !ptraced. That is why we |
2323 | * also check !task_is_stopped(t) below. | |
408a37de TH |
2324 | * |
2325 | * The condition can be distinguished by testing whether | |
2326 | * SIGNAL_STOP_STOPPED is already set. Don't generate | |
2327 | * group_exit_code in such case. | |
2328 | * | |
2329 | * This is not necessary for SIGNAL_STOP_CONTINUED because | |
2330 | * an intervening stop signal is required to cause two | |
2331 | * continued events regardless of ptrace. | |
1da177e4 | 2332 | */ |
408a37de TH |
2333 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
2334 | sig->group_exit_code = signr; | |
1da177e4 | 2335 | |
7dd3db54 TH |
2336 | sig->group_stop_count = 0; |
2337 | ||
2338 | if (task_set_jobctl_pending(current, signr | gstop)) | |
2339 | sig->group_stop_count++; | |
1da177e4 | 2340 | |
8d38f203 ON |
2341 | t = current; |
2342 | while_each_thread(current, t) { | |
1da177e4 | 2343 | /* |
a122b341 ON |
2344 | * Setting state to TASK_STOPPED for a group |
2345 | * stop is always done with the siglock held, | |
2346 | * so this check has no races. | |
1da177e4 | 2347 | */ |
7dd3db54 TH |
2348 | if (!task_is_stopped(t) && |
2349 | task_set_jobctl_pending(t, signr | gstop)) { | |
ae6d2ed7 | 2350 | sig->group_stop_count++; |
fb1d910c TH |
2351 | if (likely(!(t->ptrace & PT_SEIZED))) |
2352 | signal_wake_up(t, 0); | |
2353 | else | |
2354 | ptrace_trap_notify(t); | |
a122b341 | 2355 | } |
d79fdd6d | 2356 | } |
1da177e4 | 2357 | } |
73ddff2b | 2358 | |
d21142ec | 2359 | if (likely(!current->ptrace)) { |
5224fa36 | 2360 | int notify = 0; |
1da177e4 | 2361 | |
5224fa36 TH |
2362 | /* |
2363 | * If there are no other threads in the group, or if there | |
2364 | * is a group stop in progress and we are the last to stop, | |
2365 | * report to the parent. | |
2366 | */ | |
2367 | if (task_participate_group_stop(current)) | |
2368 | notify = CLD_STOPPED; | |
2369 | ||
b5bf9a90 | 2370 | set_special_state(TASK_STOPPED); |
5224fa36 TH |
2371 | spin_unlock_irq(¤t->sighand->siglock); |
2372 | ||
62bcf9d9 TH |
2373 | /* |
2374 | * Notify the parent of the group stop completion. Because | |
2375 | * we're not holding either the siglock or tasklist_lock | |
2376 | * here, ptracer may attach inbetween; however, this is for | |
2377 | * group stop and should always be delivered to the real | |
2378 | * parent of the group leader. The new ptracer will get | |
2379 | * its notification when this task transitions into | |
2380 | * TASK_TRACED. | |
2381 | */ | |
5224fa36 TH |
2382 | if (notify) { |
2383 | read_lock(&tasklist_lock); | |
62bcf9d9 | 2384 | do_notify_parent_cldstop(current, false, notify); |
5224fa36 TH |
2385 | read_unlock(&tasklist_lock); |
2386 | } | |
2387 | ||
2388 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | |
76f969e8 | 2389 | cgroup_enter_frozen(); |
5d8f72b5 | 2390 | freezable_schedule(); |
73ddff2b | 2391 | return true; |
d79fdd6d | 2392 | } else { |
73ddff2b TH |
2393 | /* |
2394 | * While ptraced, group stop is handled by STOP trap. | |
2395 | * Schedule it and let the caller deal with it. | |
2396 | */ | |
2397 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); | |
2398 | return false; | |
ae6d2ed7 | 2399 | } |
73ddff2b | 2400 | } |
1da177e4 | 2401 | |
73ddff2b TH |
2402 | /** |
2403 | * do_jobctl_trap - take care of ptrace jobctl traps | |
2404 | * | |
3544d72a TH |
2405 | * When PT_SEIZED, it's used for both group stop and explicit |
2406 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with | |
2407 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain | |
2408 | * the stop signal; otherwise, %SIGTRAP. | |
2409 | * | |
2410 | * When !PT_SEIZED, it's used only for group stop trap with stop signal | |
2411 | * number as exit_code and no siginfo. | |
73ddff2b TH |
2412 | * |
2413 | * CONTEXT: | |
2414 | * Must be called with @current->sighand->siglock held, which may be | |
2415 | * released and re-acquired before returning with intervening sleep. | |
2416 | */ | |
2417 | static void do_jobctl_trap(void) | |
2418 | { | |
3544d72a | 2419 | struct signal_struct *signal = current->signal; |
73ddff2b | 2420 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
ae6d2ed7 | 2421 | |
3544d72a TH |
2422 | if (current->ptrace & PT_SEIZED) { |
2423 | if (!signal->group_stop_count && | |
2424 | !(signal->flags & SIGNAL_STOP_STOPPED)) | |
2425 | signr = SIGTRAP; | |
2426 | WARN_ON_ONCE(!signr); | |
2427 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), | |
2428 | CLD_STOPPED); | |
2429 | } else { | |
2430 | WARN_ON_ONCE(!signr); | |
2431 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); | |
2432 | current->exit_code = 0; | |
ae6d2ed7 | 2433 | } |
1da177e4 LT |
2434 | } |
2435 | ||
76f969e8 RG |
2436 | /** |
2437 | * do_freezer_trap - handle the freezer jobctl trap | |
2438 | * | |
2439 | * Puts the task into frozen state, if only the task is not about to quit. | |
2440 | * In this case it drops JOBCTL_TRAP_FREEZE. | |
2441 | * | |
2442 | * CONTEXT: | |
2443 | * Must be called with @current->sighand->siglock held, | |
2444 | * which is always released before returning. | |
2445 | */ | |
2446 | static void do_freezer_trap(void) | |
2447 | __releases(¤t->sighand->siglock) | |
2448 | { | |
2449 | /* | |
2450 | * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, | |
2451 | * let's make another loop to give it a chance to be handled. | |
2452 | * In any case, we'll return back. | |
2453 | */ | |
2454 | if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != | |
2455 | JOBCTL_TRAP_FREEZE) { | |
2456 | spin_unlock_irq(¤t->sighand->siglock); | |
2457 | return; | |
2458 | } | |
2459 | ||
2460 | /* | |
2461 | * Now we're sure that there is no pending fatal signal and no | |
2462 | * pending traps. Clear TIF_SIGPENDING to not get out of schedule() | |
2463 | * immediately (if there is a non-fatal signal pending), and | |
2464 | * put the task into sleep. | |
2465 | */ | |
2466 | __set_current_state(TASK_INTERRUPTIBLE); | |
2467 | clear_thread_flag(TIF_SIGPENDING); | |
2468 | spin_unlock_irq(¤t->sighand->siglock); | |
2469 | cgroup_enter_frozen(); | |
2470 | freezable_schedule(); | |
2471 | } | |
2472 | ||
ae7795bc | 2473 | static int ptrace_signal(int signr, kernel_siginfo_t *info) |
18c98b65 | 2474 | { |
8a352418 ON |
2475 | /* |
2476 | * We do not check sig_kernel_stop(signr) but set this marker | |
2477 | * unconditionally because we do not know whether debugger will | |
2478 | * change signr. This flag has no meaning unless we are going | |
2479 | * to stop after return from ptrace_stop(). In this case it will | |
2480 | * be checked in do_signal_stop(), we should only stop if it was | |
2481 | * not cleared by SIGCONT while we were sleeping. See also the | |
2482 | * comment in dequeue_signal(). | |
2483 | */ | |
2484 | current->jobctl |= JOBCTL_STOP_DEQUEUED; | |
fe1bc6a0 | 2485 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
18c98b65 RM |
2486 | |
2487 | /* We're back. Did the debugger cancel the sig? */ | |
2488 | signr = current->exit_code; | |
2489 | if (signr == 0) | |
2490 | return signr; | |
2491 | ||
2492 | current->exit_code = 0; | |
2493 | ||
5aba085e RD |
2494 | /* |
2495 | * Update the siginfo structure if the signal has | |
2496 | * changed. If the debugger wanted something | |
2497 | * specific in the siginfo structure then it should | |
2498 | * have updated *info via PTRACE_SETSIGINFO. | |
2499 | */ | |
18c98b65 | 2500 | if (signr != info->si_signo) { |
faf1f22b | 2501 | clear_siginfo(info); |
18c98b65 RM |
2502 | info->si_signo = signr; |
2503 | info->si_errno = 0; | |
2504 | info->si_code = SI_USER; | |
6b550f94 | 2505 | rcu_read_lock(); |
18c98b65 | 2506 | info->si_pid = task_pid_vnr(current->parent); |
54ba47ed EB |
2507 | info->si_uid = from_kuid_munged(current_user_ns(), |
2508 | task_uid(current->parent)); | |
6b550f94 | 2509 | rcu_read_unlock(); |
18c98b65 RM |
2510 | } |
2511 | ||
2512 | /* If the (new) signal is now blocked, requeue it. */ | |
2513 | if (sigismember(¤t->blocked, signr)) { | |
b21c5bd5 | 2514 | send_signal(signr, info, current, PIDTYPE_PID); |
18c98b65 RM |
2515 | signr = 0; |
2516 | } | |
2517 | ||
2518 | return signr; | |
2519 | } | |
2520 | ||
20ab7218 | 2521 | bool get_signal(struct ksignal *ksig) |
1da177e4 | 2522 | { |
f6b76d4f ON |
2523 | struct sighand_struct *sighand = current->sighand; |
2524 | struct signal_struct *signal = current->signal; | |
2525 | int signr; | |
1da177e4 | 2526 | |
f784e8a7 ON |
2527 | if (unlikely(current->task_works)) |
2528 | task_work_run(); | |
72667028 | 2529 | |
0326f5a9 | 2530 | if (unlikely(uprobe_deny_signal())) |
20ab7218 | 2531 | return false; |
0326f5a9 | 2532 | |
13b1c3d4 | 2533 | /* |
5d8f72b5 ON |
2534 | * Do this once, we can't return to user-mode if freezing() == T. |
2535 | * do_signal_stop() and ptrace_stop() do freezable_schedule() and | |
2536 | * thus do not need another check after return. | |
13b1c3d4 | 2537 | */ |
fc558a74 RW |
2538 | try_to_freeze(); |
2539 | ||
5d8f72b5 | 2540 | relock: |
f6b76d4f | 2541 | spin_lock_irq(&sighand->siglock); |
021e1ae3 ON |
2542 | /* |
2543 | * Every stopped thread goes here after wakeup. Check to see if | |
2544 | * we should notify the parent, prepare_signal(SIGCONT) encodes | |
2545 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. | |
2546 | */ | |
f6b76d4f | 2547 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
c672af35 TH |
2548 | int why; |
2549 | ||
2550 | if (signal->flags & SIGNAL_CLD_CONTINUED) | |
2551 | why = CLD_CONTINUED; | |
2552 | else | |
2553 | why = CLD_STOPPED; | |
2554 | ||
f6b76d4f | 2555 | signal->flags &= ~SIGNAL_CLD_MASK; |
e4420551 | 2556 | |
ae6d2ed7 | 2557 | spin_unlock_irq(&sighand->siglock); |
fa00b80b | 2558 | |
ceb6bd67 TH |
2559 | /* |
2560 | * Notify the parent that we're continuing. This event is | |
2561 | * always per-process and doesn't make whole lot of sense | |
2562 | * for ptracers, who shouldn't consume the state via | |
2563 | * wait(2) either, but, for backward compatibility, notify | |
2564 | * the ptracer of the group leader too unless it's gonna be | |
2565 | * a duplicate. | |
2566 | */ | |
edf2ed15 | 2567 | read_lock(&tasklist_lock); |
ceb6bd67 TH |
2568 | do_notify_parent_cldstop(current, false, why); |
2569 | ||
bb3696da ON |
2570 | if (ptrace_reparented(current->group_leader)) |
2571 | do_notify_parent_cldstop(current->group_leader, | |
2572 | true, why); | |
edf2ed15 | 2573 | read_unlock(&tasklist_lock); |
ceb6bd67 | 2574 | |
e4420551 ON |
2575 | goto relock; |
2576 | } | |
2577 | ||
35634ffa | 2578 | /* Has this task already been marked for death? */ |
cf43a757 EB |
2579 | if (signal_group_exit(signal)) { |
2580 | ksig->info.si_signo = signr = SIGKILL; | |
2581 | sigdelset(¤t->pending.signal, SIGKILL); | |
98af37d6 ZW |
2582 | trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, |
2583 | &sighand->action[SIGKILL - 1]); | |
cf43a757 | 2584 | recalc_sigpending(); |
35634ffa | 2585 | goto fatal; |
cf43a757 | 2586 | } |
35634ffa | 2587 | |
1da177e4 LT |
2588 | for (;;) { |
2589 | struct k_sigaction *ka; | |
1be53963 | 2590 | |
dd1d6772 TH |
2591 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && |
2592 | do_signal_stop(0)) | |
7bcf6a2c | 2593 | goto relock; |
1be53963 | 2594 | |
76f969e8 RG |
2595 | if (unlikely(current->jobctl & |
2596 | (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { | |
2597 | if (current->jobctl & JOBCTL_TRAP_MASK) { | |
2598 | do_jobctl_trap(); | |
2599 | spin_unlock_irq(&sighand->siglock); | |
2600 | } else if (current->jobctl & JOBCTL_TRAP_FREEZE) | |
2601 | do_freezer_trap(); | |
2602 | ||
2603 | goto relock; | |
2604 | } | |
2605 | ||
2606 | /* | |
2607 | * If the task is leaving the frozen state, let's update | |
2608 | * cgroup counters and reset the frozen bit. | |
2609 | */ | |
2610 | if (unlikely(cgroup_task_frozen(current))) { | |
73ddff2b | 2611 | spin_unlock_irq(&sighand->siglock); |
cb2c4cd8 | 2612 | cgroup_leave_frozen(false); |
73ddff2b TH |
2613 | goto relock; |
2614 | } | |
1da177e4 | 2615 | |
7146db33 EB |
2616 | /* |
2617 | * Signals generated by the execution of an instruction | |
2618 | * need to be delivered before any other pending signals | |
2619 | * so that the instruction pointer in the signal stack | |
2620 | * frame points to the faulting instruction. | |
2621 | */ | |
2622 | signr = dequeue_synchronous_signal(&ksig->info); | |
2623 | if (!signr) | |
2624 | signr = dequeue_signal(current, ¤t->blocked, &ksig->info); | |
7bcf6a2c | 2625 | |
dd1d6772 TH |
2626 | if (!signr) |
2627 | break; /* will return 0 */ | |
7bcf6a2c | 2628 | |
8a352418 | 2629 | if (unlikely(current->ptrace) && signr != SIGKILL) { |
828b1f65 | 2630 | signr = ptrace_signal(signr, &ksig->info); |
dd1d6772 TH |
2631 | if (!signr) |
2632 | continue; | |
1da177e4 LT |
2633 | } |
2634 | ||
dd1d6772 TH |
2635 | ka = &sighand->action[signr-1]; |
2636 | ||
f9d4257e | 2637 | /* Trace actually delivered signals. */ |
828b1f65 | 2638 | trace_signal_deliver(signr, &ksig->info, ka); |
f9d4257e | 2639 | |
1da177e4 LT |
2640 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
2641 | continue; | |
2642 | if (ka->sa.sa_handler != SIG_DFL) { | |
2643 | /* Run the handler. */ | |
828b1f65 | 2644 | ksig->ka = *ka; |
1da177e4 LT |
2645 | |
2646 | if (ka->sa.sa_flags & SA_ONESHOT) | |
2647 | ka->sa.sa_handler = SIG_DFL; | |
2648 | ||
2649 | break; /* will return non-zero "signr" value */ | |
2650 | } | |
2651 | ||
2652 | /* | |
2653 | * Now we are doing the default action for this signal. | |
2654 | */ | |
2655 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ | |
2656 | continue; | |
2657 | ||
84d73786 | 2658 | /* |
0fbc26a6 | 2659 | * Global init gets no signals it doesn't want. |
b3bfa0cb SB |
2660 | * Container-init gets no signals it doesn't want from same |
2661 | * container. | |
2662 | * | |
2663 | * Note that if global/container-init sees a sig_kernel_only() | |
2664 | * signal here, the signal must have been generated internally | |
2665 | * or must have come from an ancestor namespace. In either | |
2666 | * case, the signal cannot be dropped. | |
84d73786 | 2667 | */ |
fae5fa44 | 2668 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
b3bfa0cb | 2669 | !sig_kernel_only(signr)) |
1da177e4 LT |
2670 | continue; |
2671 | ||
2672 | if (sig_kernel_stop(signr)) { | |
2673 | /* | |
2674 | * The default action is to stop all threads in | |
2675 | * the thread group. The job control signals | |
2676 | * do nothing in an orphaned pgrp, but SIGSTOP | |
2677 | * always works. Note that siglock needs to be | |
2678 | * dropped during the call to is_orphaned_pgrp() | |
2679 | * because of lock ordering with tasklist_lock. | |
2680 | * This allows an intervening SIGCONT to be posted. | |
2681 | * We need to check for that and bail out if necessary. | |
2682 | */ | |
2683 | if (signr != SIGSTOP) { | |
f6b76d4f | 2684 | spin_unlock_irq(&sighand->siglock); |
1da177e4 LT |
2685 | |
2686 | /* signals can be posted during this window */ | |
2687 | ||
3e7cd6c4 | 2688 | if (is_current_pgrp_orphaned()) |
1da177e4 LT |
2689 | goto relock; |
2690 | ||
f6b76d4f | 2691 | spin_lock_irq(&sighand->siglock); |
1da177e4 LT |
2692 | } |
2693 | ||
828b1f65 | 2694 | if (likely(do_signal_stop(ksig->info.si_signo))) { |
1da177e4 LT |
2695 | /* It released the siglock. */ |
2696 | goto relock; | |
2697 | } | |
2698 | ||
2699 | /* | |
2700 | * We didn't actually stop, due to a race | |
2701 | * with SIGCONT or something like that. | |
2702 | */ | |
2703 | continue; | |
2704 | } | |
2705 | ||
35634ffa | 2706 | fatal: |
f6b76d4f | 2707 | spin_unlock_irq(&sighand->siglock); |
f2b31bb5 RG |
2708 | if (unlikely(cgroup_task_frozen(current))) |
2709 | cgroup_leave_frozen(true); | |
1da177e4 LT |
2710 | |
2711 | /* | |
2712 | * Anything else is fatal, maybe with a core dump. | |
2713 | */ | |
2714 | current->flags |= PF_SIGNALED; | |
2dce81bf | 2715 | |
1da177e4 | 2716 | if (sig_kernel_coredump(signr)) { |
2dce81bf | 2717 | if (print_fatal_signals) |
828b1f65 | 2718 | print_fatal_signal(ksig->info.si_signo); |
2b5faa4c | 2719 | proc_coredump_connector(current); |
1da177e4 LT |
2720 | /* |
2721 | * If it was able to dump core, this kills all | |
2722 | * other threads in the group and synchronizes with | |
2723 | * their demise. If we lost the race with another | |
2724 | * thread getting here, it set group_exit_code | |
2725 | * first and our do_group_exit call below will use | |
2726 | * that value and ignore the one we pass it. | |
2727 | */ | |
828b1f65 | 2728 | do_coredump(&ksig->info); |
1da177e4 LT |
2729 | } |
2730 | ||
2731 | /* | |
2732 | * Death signals, no core dump. | |
2733 | */ | |
828b1f65 | 2734 | do_group_exit(ksig->info.si_signo); |
1da177e4 LT |
2735 | /* NOTREACHED */ |
2736 | } | |
f6b76d4f | 2737 | spin_unlock_irq(&sighand->siglock); |
828b1f65 RW |
2738 | |
2739 | ksig->sig = signr; | |
2740 | return ksig->sig > 0; | |
1da177e4 LT |
2741 | } |
2742 | ||
5e6292c0 | 2743 | /** |
efee984c | 2744 | * signal_delivered - |
10b1c7ac | 2745 | * @ksig: kernel signal struct |
efee984c | 2746 | * @stepping: nonzero if debugger single-step or block-step in use |
5e6292c0 | 2747 | * |
e227867f | 2748 | * This function should be called when a signal has successfully been |
10b1c7ac | 2749 | * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask |
efee984c | 2750 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER |
10b1c7ac | 2751 | * is set in @ksig->ka.sa.sa_flags. Tracing is notified. |
5e6292c0 | 2752 | */ |
10b1c7ac | 2753 | static void signal_delivered(struct ksignal *ksig, int stepping) |
5e6292c0 MF |
2754 | { |
2755 | sigset_t blocked; | |
2756 | ||
a610d6e6 AV |
2757 | /* A signal was successfully delivered, and the |
2758 | saved sigmask was stored on the signal frame, | |
2759 | and will be restored by sigreturn. So we can | |
2760 | simply clear the restore sigmask flag. */ | |
2761 | clear_restore_sigmask(); | |
2762 | ||
10b1c7ac RW |
2763 | sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); |
2764 | if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) | |
2765 | sigaddset(&blocked, ksig->sig); | |
5e6292c0 | 2766 | set_current_blocked(&blocked); |
df5601f9 | 2767 | tracehook_signal_handler(stepping); |
5e6292c0 MF |
2768 | } |
2769 | ||
2ce5da17 AV |
2770 | void signal_setup_done(int failed, struct ksignal *ksig, int stepping) |
2771 | { | |
2772 | if (failed) | |
cb44c9a0 | 2773 | force_sigsegv(ksig->sig); |
2ce5da17 | 2774 | else |
10b1c7ac | 2775 | signal_delivered(ksig, stepping); |
2ce5da17 AV |
2776 | } |
2777 | ||
0edceb7b ON |
2778 | /* |
2779 | * It could be that complete_signal() picked us to notify about the | |
fec9993d ON |
2780 | * group-wide signal. Other threads should be notified now to take |
2781 | * the shared signals in @which since we will not. | |
0edceb7b | 2782 | */ |
f646e227 | 2783 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
0edceb7b | 2784 | { |
f646e227 | 2785 | sigset_t retarget; |
0edceb7b ON |
2786 | struct task_struct *t; |
2787 | ||
f646e227 ON |
2788 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); |
2789 | if (sigisemptyset(&retarget)) | |
2790 | return; | |
2791 | ||
0edceb7b ON |
2792 | t = tsk; |
2793 | while_each_thread(tsk, t) { | |
fec9993d ON |
2794 | if (t->flags & PF_EXITING) |
2795 | continue; | |
2796 | ||
2797 | if (!has_pending_signals(&retarget, &t->blocked)) | |
2798 | continue; | |
2799 | /* Remove the signals this thread can handle. */ | |
2800 | sigandsets(&retarget, &retarget, &t->blocked); | |
2801 | ||
2802 | if (!signal_pending(t)) | |
2803 | signal_wake_up(t, 0); | |
2804 | ||
2805 | if (sigisemptyset(&retarget)) | |
2806 | break; | |
0edceb7b ON |
2807 | } |
2808 | } | |
2809 | ||
d12619b5 ON |
2810 | void exit_signals(struct task_struct *tsk) |
2811 | { | |
2812 | int group_stop = 0; | |
f646e227 | 2813 | sigset_t unblocked; |
d12619b5 | 2814 | |
77e4ef99 TH |
2815 | /* |
2816 | * @tsk is about to have PF_EXITING set - lock out users which | |
2817 | * expect stable threadgroup. | |
2818 | */ | |
780de9dd | 2819 | cgroup_threadgroup_change_begin(tsk); |
77e4ef99 | 2820 | |
5dee1707 ON |
2821 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2822 | tsk->flags |= PF_EXITING; | |
780de9dd | 2823 | cgroup_threadgroup_change_end(tsk); |
5dee1707 | 2824 | return; |
d12619b5 ON |
2825 | } |
2826 | ||
5dee1707 | 2827 | spin_lock_irq(&tsk->sighand->siglock); |
d12619b5 ON |
2828 | /* |
2829 | * From now this task is not visible for group-wide signals, | |
2830 | * see wants_signal(), do_signal_stop(). | |
2831 | */ | |
2832 | tsk->flags |= PF_EXITING; | |
77e4ef99 | 2833 | |
780de9dd | 2834 | cgroup_threadgroup_change_end(tsk); |
77e4ef99 | 2835 | |
5dee1707 ON |
2836 | if (!signal_pending(tsk)) |
2837 | goto out; | |
2838 | ||
f646e227 ON |
2839 | unblocked = tsk->blocked; |
2840 | signotset(&unblocked); | |
2841 | retarget_shared_pending(tsk, &unblocked); | |
5dee1707 | 2842 | |
a8f072c1 | 2843 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
e5c1902e | 2844 | task_participate_group_stop(tsk)) |
edf2ed15 | 2845 | group_stop = CLD_STOPPED; |
5dee1707 | 2846 | out: |
d12619b5 ON |
2847 | spin_unlock_irq(&tsk->sighand->siglock); |
2848 | ||
62bcf9d9 TH |
2849 | /* |
2850 | * If group stop has completed, deliver the notification. This | |
2851 | * should always go to the real parent of the group leader. | |
2852 | */ | |
ae6d2ed7 | 2853 | if (unlikely(group_stop)) { |
d12619b5 | 2854 | read_lock(&tasklist_lock); |
62bcf9d9 | 2855 | do_notify_parent_cldstop(tsk, false, group_stop); |
d12619b5 ON |
2856 | read_unlock(&tasklist_lock); |
2857 | } | |
2858 | } | |
2859 | ||
1da177e4 LT |
2860 | /* |
2861 | * System call entry points. | |
2862 | */ | |
2863 | ||
41c57892 RD |
2864 | /** |
2865 | * sys_restart_syscall - restart a system call | |
2866 | */ | |
754fe8d2 | 2867 | SYSCALL_DEFINE0(restart_syscall) |
1da177e4 | 2868 | { |
f56141e3 | 2869 | struct restart_block *restart = ¤t->restart_block; |
1da177e4 LT |
2870 | return restart->fn(restart); |
2871 | } | |
2872 | ||
2873 | long do_no_restart_syscall(struct restart_block *param) | |
2874 | { | |
2875 | return -EINTR; | |
2876 | } | |
2877 | ||
b182801a ON |
2878 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
2879 | { | |
2880 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { | |
2881 | sigset_t newblocked; | |
2882 | /* A set of now blocked but previously unblocked signals. */ | |
702a5073 | 2883 | sigandnsets(&newblocked, newset, ¤t->blocked); |
b182801a ON |
2884 | retarget_shared_pending(tsk, &newblocked); |
2885 | } | |
2886 | tsk->blocked = *newset; | |
2887 | recalc_sigpending(); | |
2888 | } | |
2889 | ||
e6fa16ab ON |
2890 | /** |
2891 | * set_current_blocked - change current->blocked mask | |
2892 | * @newset: new mask | |
2893 | * | |
2894 | * It is wrong to change ->blocked directly, this helper should be used | |
2895 | * to ensure the process can't miss a shared signal we are going to block. | |
1da177e4 | 2896 | */ |
77097ae5 AV |
2897 | void set_current_blocked(sigset_t *newset) |
2898 | { | |
77097ae5 | 2899 | sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
0c4a8423 | 2900 | __set_current_blocked(newset); |
77097ae5 AV |
2901 | } |
2902 | ||
2903 | void __set_current_blocked(const sigset_t *newset) | |
e6fa16ab ON |
2904 | { |
2905 | struct task_struct *tsk = current; | |
2906 | ||
c7be96af WL |
2907 | /* |
2908 | * In case the signal mask hasn't changed, there is nothing we need | |
2909 | * to do. The current->blocked shouldn't be modified by other task. | |
2910 | */ | |
2911 | if (sigequalsets(&tsk->blocked, newset)) | |
2912 | return; | |
2913 | ||
e6fa16ab | 2914 | spin_lock_irq(&tsk->sighand->siglock); |
b182801a | 2915 | __set_task_blocked(tsk, newset); |
e6fa16ab ON |
2916 | spin_unlock_irq(&tsk->sighand->siglock); |
2917 | } | |
1da177e4 LT |
2918 | |
2919 | /* | |
2920 | * This is also useful for kernel threads that want to temporarily | |
2921 | * (or permanently) block certain signals. | |
2922 | * | |
2923 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel | |
2924 | * interface happily blocks "unblockable" signals like SIGKILL | |
2925 | * and friends. | |
2926 | */ | |
2927 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | |
2928 | { | |
73ef4aeb ON |
2929 | struct task_struct *tsk = current; |
2930 | sigset_t newset; | |
1da177e4 | 2931 | |
73ef4aeb | 2932 | /* Lockless, only current can change ->blocked, never from irq */ |
a26fd335 | 2933 | if (oldset) |
73ef4aeb | 2934 | *oldset = tsk->blocked; |
a26fd335 | 2935 | |
1da177e4 LT |
2936 | switch (how) { |
2937 | case SIG_BLOCK: | |
73ef4aeb | 2938 | sigorsets(&newset, &tsk->blocked, set); |
1da177e4 LT |
2939 | break; |
2940 | case SIG_UNBLOCK: | |
702a5073 | 2941 | sigandnsets(&newset, &tsk->blocked, set); |
1da177e4 LT |
2942 | break; |
2943 | case SIG_SETMASK: | |
73ef4aeb | 2944 | newset = *set; |
1da177e4 LT |
2945 | break; |
2946 | default: | |
73ef4aeb | 2947 | return -EINVAL; |
1da177e4 | 2948 | } |
a26fd335 | 2949 | |
77097ae5 | 2950 | __set_current_blocked(&newset); |
73ef4aeb | 2951 | return 0; |
1da177e4 | 2952 | } |
fb50f5a4 | 2953 | EXPORT_SYMBOL(sigprocmask); |
1da177e4 | 2954 | |
ded653cc DD |
2955 | /* |
2956 | * The api helps set app-provided sigmasks. | |
2957 | * | |
2958 | * This is useful for syscalls such as ppoll, pselect, io_pgetevents and | |
2959 | * epoll_pwait where a new sigmask is passed from userland for the syscalls. | |
b772434b ON |
2960 | * |
2961 | * Note that it does set_restore_sigmask() in advance, so it must be always | |
2962 | * paired with restore_saved_sigmask_unless() before return from syscall. | |
ded653cc | 2963 | */ |
b772434b | 2964 | int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize) |
ded653cc | 2965 | { |
b772434b | 2966 | sigset_t kmask; |
ded653cc | 2967 | |
b772434b ON |
2968 | if (!umask) |
2969 | return 0; | |
ded653cc DD |
2970 | if (sigsetsize != sizeof(sigset_t)) |
2971 | return -EINVAL; | |
b772434b | 2972 | if (copy_from_user(&kmask, umask, sizeof(sigset_t))) |
ded653cc DD |
2973 | return -EFAULT; |
2974 | ||
b772434b ON |
2975 | set_restore_sigmask(); |
2976 | current->saved_sigmask = current->blocked; | |
2977 | set_current_blocked(&kmask); | |
ded653cc DD |
2978 | |
2979 | return 0; | |
2980 | } | |
ded653cc DD |
2981 | |
2982 | #ifdef CONFIG_COMPAT | |
b772434b | 2983 | int set_compat_user_sigmask(const compat_sigset_t __user *umask, |
ded653cc DD |
2984 | size_t sigsetsize) |
2985 | { | |
b772434b | 2986 | sigset_t kmask; |
ded653cc | 2987 | |
b772434b ON |
2988 | if (!umask) |
2989 | return 0; | |
ded653cc DD |
2990 | if (sigsetsize != sizeof(compat_sigset_t)) |
2991 | return -EINVAL; | |
b772434b | 2992 | if (get_compat_sigset(&kmask, umask)) |
ded653cc DD |
2993 | return -EFAULT; |
2994 | ||
b772434b ON |
2995 | set_restore_sigmask(); |
2996 | current->saved_sigmask = current->blocked; | |
2997 | set_current_blocked(&kmask); | |
ded653cc DD |
2998 | |
2999 | return 0; | |
3000 | } | |
ded653cc DD |
3001 | #endif |
3002 | ||
41c57892 RD |
3003 | /** |
3004 | * sys_rt_sigprocmask - change the list of currently blocked signals | |
3005 | * @how: whether to add, remove, or set signals | |
ada9c933 | 3006 | * @nset: stores pending signals |
41c57892 RD |
3007 | * @oset: previous value of signal mask if non-null |
3008 | * @sigsetsize: size of sigset_t type | |
3009 | */ | |
bb7efee2 | 3010 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
17da2bd9 | 3011 | sigset_t __user *, oset, size_t, sigsetsize) |
1da177e4 | 3012 | { |
1da177e4 | 3013 | sigset_t old_set, new_set; |
bb7efee2 | 3014 | int error; |
1da177e4 LT |
3015 | |
3016 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3017 | if (sigsetsize != sizeof(sigset_t)) | |
bb7efee2 | 3018 | return -EINVAL; |
1da177e4 | 3019 | |
bb7efee2 ON |
3020 | old_set = current->blocked; |
3021 | ||
3022 | if (nset) { | |
3023 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) | |
3024 | return -EFAULT; | |
1da177e4 LT |
3025 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
3026 | ||
bb7efee2 | 3027 | error = sigprocmask(how, &new_set, NULL); |
1da177e4 | 3028 | if (error) |
bb7efee2 ON |
3029 | return error; |
3030 | } | |
1da177e4 | 3031 | |
bb7efee2 ON |
3032 | if (oset) { |
3033 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) | |
3034 | return -EFAULT; | |
1da177e4 | 3035 | } |
bb7efee2 ON |
3036 | |
3037 | return 0; | |
1da177e4 LT |
3038 | } |
3039 | ||
322a56cb | 3040 | #ifdef CONFIG_COMPAT |
322a56cb AV |
3041 | COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, |
3042 | compat_sigset_t __user *, oset, compat_size_t, sigsetsize) | |
1da177e4 | 3043 | { |
322a56cb AV |
3044 | sigset_t old_set = current->blocked; |
3045 | ||
3046 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3047 | if (sigsetsize != sizeof(sigset_t)) | |
3048 | return -EINVAL; | |
3049 | ||
3050 | if (nset) { | |
322a56cb AV |
3051 | sigset_t new_set; |
3052 | int error; | |
3968cf62 | 3053 | if (get_compat_sigset(&new_set, nset)) |
322a56cb | 3054 | return -EFAULT; |
322a56cb AV |
3055 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
3056 | ||
3057 | error = sigprocmask(how, &new_set, NULL); | |
3058 | if (error) | |
3059 | return error; | |
3060 | } | |
f454322e | 3061 | return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0; |
322a56cb AV |
3062 | } |
3063 | #endif | |
1da177e4 | 3064 | |
b1d294c8 | 3065 | static void do_sigpending(sigset_t *set) |
1da177e4 | 3066 | { |
1da177e4 | 3067 | spin_lock_irq(¤t->sighand->siglock); |
fe9c1db2 | 3068 | sigorsets(set, ¤t->pending.signal, |
1da177e4 LT |
3069 | ¤t->signal->shared_pending.signal); |
3070 | spin_unlock_irq(¤t->sighand->siglock); | |
3071 | ||
3072 | /* Outside the lock because only this thread touches it. */ | |
fe9c1db2 | 3073 | sigandsets(set, ¤t->blocked, set); |
5aba085e | 3074 | } |
1da177e4 | 3075 | |
41c57892 RD |
3076 | /** |
3077 | * sys_rt_sigpending - examine a pending signal that has been raised | |
3078 | * while blocked | |
20f22ab4 | 3079 | * @uset: stores pending signals |
41c57892 RD |
3080 | * @sigsetsize: size of sigset_t type or larger |
3081 | */ | |
fe9c1db2 | 3082 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) |
1da177e4 | 3083 | { |
fe9c1db2 | 3084 | sigset_t set; |
176826af DL |
3085 | |
3086 | if (sigsetsize > sizeof(*uset)) | |
3087 | return -EINVAL; | |
3088 | ||
b1d294c8 CB |
3089 | do_sigpending(&set); |
3090 | ||
3091 | if (copy_to_user(uset, &set, sigsetsize)) | |
3092 | return -EFAULT; | |
3093 | ||
3094 | return 0; | |
fe9c1db2 AV |
3095 | } |
3096 | ||
3097 | #ifdef CONFIG_COMPAT | |
fe9c1db2 AV |
3098 | COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, |
3099 | compat_size_t, sigsetsize) | |
1da177e4 | 3100 | { |
fe9c1db2 | 3101 | sigset_t set; |
176826af DL |
3102 | |
3103 | if (sigsetsize > sizeof(*uset)) | |
3104 | return -EINVAL; | |
3105 | ||
b1d294c8 CB |
3106 | do_sigpending(&set); |
3107 | ||
3108 | return put_compat_sigset(uset, &set, sigsetsize); | |
1da177e4 | 3109 | } |
fe9c1db2 | 3110 | #endif |
1da177e4 | 3111 | |
4ce5f9c9 EB |
3112 | static const struct { |
3113 | unsigned char limit, layout; | |
3114 | } sig_sicodes[] = { | |
3115 | [SIGILL] = { NSIGILL, SIL_FAULT }, | |
3116 | [SIGFPE] = { NSIGFPE, SIL_FAULT }, | |
3117 | [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, | |
3118 | [SIGBUS] = { NSIGBUS, SIL_FAULT }, | |
3119 | [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, | |
3120 | #if defined(SIGEMT) | |
3121 | [SIGEMT] = { NSIGEMT, SIL_FAULT }, | |
3122 | #endif | |
3123 | [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, | |
3124 | [SIGPOLL] = { NSIGPOLL, SIL_POLL }, | |
3125 | [SIGSYS] = { NSIGSYS, SIL_SYS }, | |
3126 | }; | |
3127 | ||
b2a2ab52 | 3128 | static bool known_siginfo_layout(unsigned sig, int si_code) |
4ce5f9c9 EB |
3129 | { |
3130 | if (si_code == SI_KERNEL) | |
3131 | return true; | |
3132 | else if ((si_code > SI_USER)) { | |
3133 | if (sig_specific_sicodes(sig)) { | |
3134 | if (si_code <= sig_sicodes[sig].limit) | |
3135 | return true; | |
3136 | } | |
3137 | else if (si_code <= NSIGPOLL) | |
3138 | return true; | |
3139 | } | |
3140 | else if (si_code >= SI_DETHREAD) | |
3141 | return true; | |
3142 | else if (si_code == SI_ASYNCNL) | |
3143 | return true; | |
3144 | return false; | |
3145 | } | |
3146 | ||
a3670058 | 3147 | enum siginfo_layout siginfo_layout(unsigned sig, int si_code) |
cc731525 EB |
3148 | { |
3149 | enum siginfo_layout layout = SIL_KILL; | |
3150 | if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { | |
4ce5f9c9 EB |
3151 | if ((sig < ARRAY_SIZE(sig_sicodes)) && |
3152 | (si_code <= sig_sicodes[sig].limit)) { | |
3153 | layout = sig_sicodes[sig].layout; | |
31931c93 EB |
3154 | /* Handle the exceptions */ |
3155 | if ((sig == SIGBUS) && | |
3156 | (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) | |
3157 | layout = SIL_FAULT_MCEERR; | |
3158 | else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) | |
3159 | layout = SIL_FAULT_BNDERR; | |
3160 | #ifdef SEGV_PKUERR | |
3161 | else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) | |
3162 | layout = SIL_FAULT_PKUERR; | |
3163 | #endif | |
3164 | } | |
cc731525 EB |
3165 | else if (si_code <= NSIGPOLL) |
3166 | layout = SIL_POLL; | |
3167 | } else { | |
3168 | if (si_code == SI_TIMER) | |
3169 | layout = SIL_TIMER; | |
3170 | else if (si_code == SI_SIGIO) | |
3171 | layout = SIL_POLL; | |
3172 | else if (si_code < 0) | |
3173 | layout = SIL_RT; | |
cc731525 EB |
3174 | } |
3175 | return layout; | |
3176 | } | |
3177 | ||
4ce5f9c9 EB |
3178 | static inline char __user *si_expansion(const siginfo_t __user *info) |
3179 | { | |
3180 | return ((char __user *)info) + sizeof(struct kernel_siginfo); | |
3181 | } | |
3182 | ||
ae7795bc | 3183 | int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) |
1da177e4 | 3184 | { |
4ce5f9c9 | 3185 | char __user *expansion = si_expansion(to); |
ae7795bc | 3186 | if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) |
1da177e4 | 3187 | return -EFAULT; |
4ce5f9c9 | 3188 | if (clear_user(expansion, SI_EXPANSION_SIZE)) |
1da177e4 | 3189 | return -EFAULT; |
c999b933 | 3190 | return 0; |
1da177e4 LT |
3191 | } |
3192 | ||
601d5abf EB |
3193 | static int post_copy_siginfo_from_user(kernel_siginfo_t *info, |
3194 | const siginfo_t __user *from) | |
4cd2e0e7 | 3195 | { |
601d5abf | 3196 | if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) { |
4ce5f9c9 EB |
3197 | char __user *expansion = si_expansion(from); |
3198 | char buf[SI_EXPANSION_SIZE]; | |
3199 | int i; | |
3200 | /* | |
3201 | * An unknown si_code might need more than | |
3202 | * sizeof(struct kernel_siginfo) bytes. Verify all of the | |
3203 | * extra bytes are 0. This guarantees copy_siginfo_to_user | |
3204 | * will return this data to userspace exactly. | |
3205 | */ | |
3206 | if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE)) | |
3207 | return -EFAULT; | |
3208 | for (i = 0; i < SI_EXPANSION_SIZE; i++) { | |
3209 | if (buf[i] != 0) | |
3210 | return -E2BIG; | |
3211 | } | |
3212 | } | |
4cd2e0e7 EB |
3213 | return 0; |
3214 | } | |
3215 | ||
601d5abf EB |
3216 | static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to, |
3217 | const siginfo_t __user *from) | |
3218 | { | |
3219 | if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) | |
3220 | return -EFAULT; | |
3221 | to->si_signo = signo; | |
3222 | return post_copy_siginfo_from_user(to, from); | |
3223 | } | |
3224 | ||
3225 | int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) | |
3226 | { | |
3227 | if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) | |
3228 | return -EFAULT; | |
3229 | return post_copy_siginfo_from_user(to, from); | |
3230 | } | |
3231 | ||
212a36a1 | 3232 | #ifdef CONFIG_COMPAT |
ea64d5ac | 3233 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, |
ae7795bc | 3234 | const struct kernel_siginfo *from) |
ea64d5ac EB |
3235 | #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) |
3236 | { | |
3237 | return __copy_siginfo_to_user32(to, from, in_x32_syscall()); | |
3238 | } | |
3239 | int __copy_siginfo_to_user32(struct compat_siginfo __user *to, | |
ae7795bc | 3240 | const struct kernel_siginfo *from, bool x32_ABI) |
ea64d5ac EB |
3241 | #endif |
3242 | { | |
3243 | struct compat_siginfo new; | |
3244 | memset(&new, 0, sizeof(new)); | |
3245 | ||
3246 | new.si_signo = from->si_signo; | |
3247 | new.si_errno = from->si_errno; | |
3248 | new.si_code = from->si_code; | |
3249 | switch(siginfo_layout(from->si_signo, from->si_code)) { | |
3250 | case SIL_KILL: | |
3251 | new.si_pid = from->si_pid; | |
3252 | new.si_uid = from->si_uid; | |
3253 | break; | |
3254 | case SIL_TIMER: | |
3255 | new.si_tid = from->si_tid; | |
3256 | new.si_overrun = from->si_overrun; | |
3257 | new.si_int = from->si_int; | |
3258 | break; | |
3259 | case SIL_POLL: | |
3260 | new.si_band = from->si_band; | |
3261 | new.si_fd = from->si_fd; | |
3262 | break; | |
3263 | case SIL_FAULT: | |
3264 | new.si_addr = ptr_to_compat(from->si_addr); | |
3265 | #ifdef __ARCH_SI_TRAPNO | |
3266 | new.si_trapno = from->si_trapno; | |
3267 | #endif | |
31931c93 EB |
3268 | break; |
3269 | case SIL_FAULT_MCEERR: | |
3270 | new.si_addr = ptr_to_compat(from->si_addr); | |
3271 | #ifdef __ARCH_SI_TRAPNO | |
3272 | new.si_trapno = from->si_trapno; | |
ea64d5ac | 3273 | #endif |
31931c93 EB |
3274 | new.si_addr_lsb = from->si_addr_lsb; |
3275 | break; | |
3276 | case SIL_FAULT_BNDERR: | |
3277 | new.si_addr = ptr_to_compat(from->si_addr); | |
3278 | #ifdef __ARCH_SI_TRAPNO | |
3279 | new.si_trapno = from->si_trapno; | |
ea64d5ac | 3280 | #endif |
31931c93 EB |
3281 | new.si_lower = ptr_to_compat(from->si_lower); |
3282 | new.si_upper = ptr_to_compat(from->si_upper); | |
3283 | break; | |
3284 | case SIL_FAULT_PKUERR: | |
3285 | new.si_addr = ptr_to_compat(from->si_addr); | |
3286 | #ifdef __ARCH_SI_TRAPNO | |
3287 | new.si_trapno = from->si_trapno; | |
ea64d5ac | 3288 | #endif |
31931c93 | 3289 | new.si_pkey = from->si_pkey; |
ea64d5ac EB |
3290 | break; |
3291 | case SIL_CHLD: | |
3292 | new.si_pid = from->si_pid; | |
3293 | new.si_uid = from->si_uid; | |
3294 | new.si_status = from->si_status; | |
3295 | #ifdef CONFIG_X86_X32_ABI | |
3296 | if (x32_ABI) { | |
3297 | new._sifields._sigchld_x32._utime = from->si_utime; | |
3298 | new._sifields._sigchld_x32._stime = from->si_stime; | |
3299 | } else | |
3300 | #endif | |
3301 | { | |
3302 | new.si_utime = from->si_utime; | |
3303 | new.si_stime = from->si_stime; | |
3304 | } | |
3305 | break; | |
3306 | case SIL_RT: | |
3307 | new.si_pid = from->si_pid; | |
3308 | new.si_uid = from->si_uid; | |
3309 | new.si_int = from->si_int; | |
3310 | break; | |
3311 | case SIL_SYS: | |
3312 | new.si_call_addr = ptr_to_compat(from->si_call_addr); | |
3313 | new.si_syscall = from->si_syscall; | |
3314 | new.si_arch = from->si_arch; | |
3315 | break; | |
3316 | } | |
3317 | ||
3318 | if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) | |
3319 | return -EFAULT; | |
3320 | ||
3321 | return 0; | |
3322 | } | |
3323 | ||
601d5abf EB |
3324 | static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, |
3325 | const struct compat_siginfo *from) | |
212a36a1 | 3326 | { |
212a36a1 | 3327 | clear_siginfo(to); |
601d5abf EB |
3328 | to->si_signo = from->si_signo; |
3329 | to->si_errno = from->si_errno; | |
3330 | to->si_code = from->si_code; | |
3331 | switch(siginfo_layout(from->si_signo, from->si_code)) { | |
212a36a1 | 3332 | case SIL_KILL: |
601d5abf EB |
3333 | to->si_pid = from->si_pid; |
3334 | to->si_uid = from->si_uid; | |
212a36a1 EB |
3335 | break; |
3336 | case SIL_TIMER: | |
601d5abf EB |
3337 | to->si_tid = from->si_tid; |
3338 | to->si_overrun = from->si_overrun; | |
3339 | to->si_int = from->si_int; | |
212a36a1 EB |
3340 | break; |
3341 | case SIL_POLL: | |
601d5abf EB |
3342 | to->si_band = from->si_band; |
3343 | to->si_fd = from->si_fd; | |
212a36a1 EB |
3344 | break; |
3345 | case SIL_FAULT: | |
601d5abf | 3346 | to->si_addr = compat_ptr(from->si_addr); |
212a36a1 | 3347 | #ifdef __ARCH_SI_TRAPNO |
601d5abf | 3348 | to->si_trapno = from->si_trapno; |
212a36a1 | 3349 | #endif |
31931c93 EB |
3350 | break; |
3351 | case SIL_FAULT_MCEERR: | |
601d5abf | 3352 | to->si_addr = compat_ptr(from->si_addr); |
31931c93 | 3353 | #ifdef __ARCH_SI_TRAPNO |
601d5abf | 3354 | to->si_trapno = from->si_trapno; |
212a36a1 | 3355 | #endif |
601d5abf | 3356 | to->si_addr_lsb = from->si_addr_lsb; |
31931c93 EB |
3357 | break; |
3358 | case SIL_FAULT_BNDERR: | |
601d5abf | 3359 | to->si_addr = compat_ptr(from->si_addr); |
31931c93 | 3360 | #ifdef __ARCH_SI_TRAPNO |
601d5abf | 3361 | to->si_trapno = from->si_trapno; |
212a36a1 | 3362 | #endif |
601d5abf EB |
3363 | to->si_lower = compat_ptr(from->si_lower); |
3364 | to->si_upper = compat_ptr(from->si_upper); | |
31931c93 EB |
3365 | break; |
3366 | case SIL_FAULT_PKUERR: | |
601d5abf | 3367 | to->si_addr = compat_ptr(from->si_addr); |
31931c93 | 3368 | #ifdef __ARCH_SI_TRAPNO |
601d5abf | 3369 | to->si_trapno = from->si_trapno; |
212a36a1 | 3370 | #endif |
601d5abf | 3371 | to->si_pkey = from->si_pkey; |
212a36a1 EB |
3372 | break; |
3373 | case SIL_CHLD: | |
601d5abf EB |
3374 | to->si_pid = from->si_pid; |
3375 | to->si_uid = from->si_uid; | |
3376 | to->si_status = from->si_status; | |
212a36a1 EB |
3377 | #ifdef CONFIG_X86_X32_ABI |
3378 | if (in_x32_syscall()) { | |
601d5abf EB |
3379 | to->si_utime = from->_sifields._sigchld_x32._utime; |
3380 | to->si_stime = from->_sifields._sigchld_x32._stime; | |
212a36a1 EB |
3381 | } else |
3382 | #endif | |
3383 | { | |
601d5abf EB |
3384 | to->si_utime = from->si_utime; |
3385 | to->si_stime = from->si_stime; | |
212a36a1 EB |
3386 | } |
3387 | break; | |
3388 | case SIL_RT: | |
601d5abf EB |
3389 | to->si_pid = from->si_pid; |
3390 | to->si_uid = from->si_uid; | |
3391 | to->si_int = from->si_int; | |
212a36a1 EB |
3392 | break; |
3393 | case SIL_SYS: | |
601d5abf EB |
3394 | to->si_call_addr = compat_ptr(from->si_call_addr); |
3395 | to->si_syscall = from->si_syscall; | |
3396 | to->si_arch = from->si_arch; | |
212a36a1 EB |
3397 | break; |
3398 | } | |
3399 | return 0; | |
3400 | } | |
601d5abf EB |
3401 | |
3402 | static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to, | |
3403 | const struct compat_siginfo __user *ufrom) | |
3404 | { | |
3405 | struct compat_siginfo from; | |
3406 | ||
3407 | if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) | |
3408 | return -EFAULT; | |
3409 | ||
3410 | from.si_signo = signo; | |
3411 | return post_copy_siginfo_from_user32(to, &from); | |
3412 | } | |
3413 | ||
3414 | int copy_siginfo_from_user32(struct kernel_siginfo *to, | |
3415 | const struct compat_siginfo __user *ufrom) | |
3416 | { | |
3417 | struct compat_siginfo from; | |
3418 | ||
3419 | if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) | |
3420 | return -EFAULT; | |
3421 | ||
3422 | return post_copy_siginfo_from_user32(to, &from); | |
3423 | } | |
212a36a1 EB |
3424 | #endif /* CONFIG_COMPAT */ |
3425 | ||
943df148 ON |
3426 | /** |
3427 | * do_sigtimedwait - wait for queued signals specified in @which | |
3428 | * @which: queued signals to wait for | |
3429 | * @info: if non-null, the signal's siginfo is returned here | |
3430 | * @ts: upper bound on process time suspension | |
3431 | */ | |
ae7795bc | 3432 | static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, |
49c39f84 | 3433 | const struct timespec64 *ts) |
943df148 | 3434 | { |
2456e855 | 3435 | ktime_t *to = NULL, timeout = KTIME_MAX; |
943df148 | 3436 | struct task_struct *tsk = current; |
943df148 | 3437 | sigset_t mask = *which; |
2b1ecc3d | 3438 | int sig, ret = 0; |
943df148 ON |
3439 | |
3440 | if (ts) { | |
49c39f84 | 3441 | if (!timespec64_valid(ts)) |
943df148 | 3442 | return -EINVAL; |
49c39f84 | 3443 | timeout = timespec64_to_ktime(*ts); |
2b1ecc3d | 3444 | to = &timeout; |
943df148 ON |
3445 | } |
3446 | ||
3447 | /* | |
3448 | * Invert the set of allowed signals to get those we want to block. | |
3449 | */ | |
3450 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
3451 | signotset(&mask); | |
3452 | ||
3453 | spin_lock_irq(&tsk->sighand->siglock); | |
3454 | sig = dequeue_signal(tsk, &mask, info); | |
2456e855 | 3455 | if (!sig && timeout) { |
943df148 ON |
3456 | /* |
3457 | * None ready, temporarily unblock those we're interested | |
3458 | * while we are sleeping in so that we'll be awakened when | |
b182801a ON |
3459 | * they arrive. Unblocking is always fine, we can avoid |
3460 | * set_current_blocked(). | |
943df148 ON |
3461 | */ |
3462 | tsk->real_blocked = tsk->blocked; | |
3463 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); | |
3464 | recalc_sigpending(); | |
3465 | spin_unlock_irq(&tsk->sighand->siglock); | |
3466 | ||
2b1ecc3d TG |
3467 | __set_current_state(TASK_INTERRUPTIBLE); |
3468 | ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, | |
3469 | HRTIMER_MODE_REL); | |
943df148 | 3470 | spin_lock_irq(&tsk->sighand->siglock); |
b182801a | 3471 | __set_task_blocked(tsk, &tsk->real_blocked); |
6114041a | 3472 | sigemptyset(&tsk->real_blocked); |
b182801a | 3473 | sig = dequeue_signal(tsk, &mask, info); |
943df148 ON |
3474 | } |
3475 | spin_unlock_irq(&tsk->sighand->siglock); | |
3476 | ||
3477 | if (sig) | |
3478 | return sig; | |
2b1ecc3d | 3479 | return ret ? -EINTR : -EAGAIN; |
943df148 ON |
3480 | } |
3481 | ||
41c57892 RD |
3482 | /** |
3483 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified | |
3484 | * in @uthese | |
3485 | * @uthese: queued signals to wait for | |
3486 | * @uinfo: if non-null, the signal's siginfo is returned here | |
3487 | * @uts: upper bound on process time suspension | |
3488 | * @sigsetsize: size of sigset_t type | |
3489 | */ | |
17da2bd9 | 3490 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
49c39f84 AB |
3491 | siginfo_t __user *, uinfo, |
3492 | const struct __kernel_timespec __user *, uts, | |
17da2bd9 | 3493 | size_t, sigsetsize) |
1da177e4 | 3494 | { |
1da177e4 | 3495 | sigset_t these; |
49c39f84 | 3496 | struct timespec64 ts; |
ae7795bc | 3497 | kernel_siginfo_t info; |
943df148 | 3498 | int ret; |
1da177e4 LT |
3499 | |
3500 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3501 | if (sigsetsize != sizeof(sigset_t)) | |
3502 | return -EINVAL; | |
3503 | ||
3504 | if (copy_from_user(&these, uthese, sizeof(these))) | |
3505 | return -EFAULT; | |
5aba085e | 3506 | |
1da177e4 | 3507 | if (uts) { |
49c39f84 | 3508 | if (get_timespec64(&ts, uts)) |
1da177e4 | 3509 | return -EFAULT; |
1da177e4 LT |
3510 | } |
3511 | ||
943df148 | 3512 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
1da177e4 | 3513 | |
943df148 ON |
3514 | if (ret > 0 && uinfo) { |
3515 | if (copy_siginfo_to_user(uinfo, &info)) | |
3516 | ret = -EFAULT; | |
1da177e4 LT |
3517 | } |
3518 | ||
3519 | return ret; | |
3520 | } | |
3521 | ||
df8522a3 AB |
3522 | #ifdef CONFIG_COMPAT_32BIT_TIME |
3523 | SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese, | |
3524 | siginfo_t __user *, uinfo, | |
3525 | const struct old_timespec32 __user *, uts, | |
3526 | size_t, sigsetsize) | |
3527 | { | |
3528 | sigset_t these; | |
3529 | struct timespec64 ts; | |
3530 | kernel_siginfo_t info; | |
3531 | int ret; | |
3532 | ||
3533 | if (sigsetsize != sizeof(sigset_t)) | |
3534 | return -EINVAL; | |
3535 | ||
3536 | if (copy_from_user(&these, uthese, sizeof(these))) | |
3537 | return -EFAULT; | |
3538 | ||
3539 | if (uts) { | |
3540 | if (get_old_timespec32(&ts, uts)) | |
3541 | return -EFAULT; | |
3542 | } | |
3543 | ||
3544 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); | |
3545 | ||
3546 | if (ret > 0 && uinfo) { | |
3547 | if (copy_siginfo_to_user(uinfo, &info)) | |
3548 | ret = -EFAULT; | |
3549 | } | |
3550 | ||
3551 | return ret; | |
3552 | } | |
3553 | #endif | |
3554 | ||
1b3c872c | 3555 | #ifdef CONFIG_COMPAT |
2367c4b5 AB |
3556 | COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese, |
3557 | struct compat_siginfo __user *, uinfo, | |
3558 | struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize) | |
3559 | { | |
3560 | sigset_t s; | |
3561 | struct timespec64 t; | |
3562 | kernel_siginfo_t info; | |
3563 | long ret; | |
3564 | ||
3565 | if (sigsetsize != sizeof(sigset_t)) | |
3566 | return -EINVAL; | |
3567 | ||
3568 | if (get_compat_sigset(&s, uthese)) | |
3569 | return -EFAULT; | |
3570 | ||
3571 | if (uts) { | |
3572 | if (get_timespec64(&t, uts)) | |
3573 | return -EFAULT; | |
3574 | } | |
3575 | ||
3576 | ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); | |
3577 | ||
3578 | if (ret > 0 && uinfo) { | |
3579 | if (copy_siginfo_to_user32(uinfo, &info)) | |
3580 | ret = -EFAULT; | |
3581 | } | |
3582 | ||
3583 | return ret; | |
3584 | } | |
3585 | ||
3586 | #ifdef CONFIG_COMPAT_32BIT_TIME | |
8dabe724 | 3587 | COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, |
1b3c872c | 3588 | struct compat_siginfo __user *, uinfo, |
9afc5eee | 3589 | struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) |
1b3c872c | 3590 | { |
1b3c872c | 3591 | sigset_t s; |
49c39f84 | 3592 | struct timespec64 t; |
ae7795bc | 3593 | kernel_siginfo_t info; |
1b3c872c AV |
3594 | long ret; |
3595 | ||
3596 | if (sigsetsize != sizeof(sigset_t)) | |
3597 | return -EINVAL; | |
3598 | ||
3968cf62 | 3599 | if (get_compat_sigset(&s, uthese)) |
1b3c872c | 3600 | return -EFAULT; |
1b3c872c AV |
3601 | |
3602 | if (uts) { | |
49c39f84 | 3603 | if (get_old_timespec32(&t, uts)) |
1b3c872c AV |
3604 | return -EFAULT; |
3605 | } | |
3606 | ||
3607 | ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); | |
3608 | ||
3609 | if (ret > 0 && uinfo) { | |
3610 | if (copy_siginfo_to_user32(uinfo, &info)) | |
3611 | ret = -EFAULT; | |
3612 | } | |
3613 | ||
3614 | return ret; | |
3615 | } | |
3616 | #endif | |
2367c4b5 | 3617 | #endif |
1b3c872c | 3618 | |
3eb39f47 CB |
3619 | static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info) |
3620 | { | |
3621 | clear_siginfo(info); | |
3622 | info->si_signo = sig; | |
3623 | info->si_errno = 0; | |
3624 | info->si_code = SI_USER; | |
3625 | info->si_pid = task_tgid_vnr(current); | |
3626 | info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); | |
3627 | } | |
3628 | ||
41c57892 RD |
3629 | /** |
3630 | * sys_kill - send a signal to a process | |
3631 | * @pid: the PID of the process | |
3632 | * @sig: signal to be sent | |
3633 | */ | |
17da2bd9 | 3634 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
1da177e4 | 3635 | { |
ae7795bc | 3636 | struct kernel_siginfo info; |
1da177e4 | 3637 | |
3eb39f47 | 3638 | prepare_kill_siginfo(sig, &info); |
1da177e4 LT |
3639 | |
3640 | return kill_something_info(sig, &info, pid); | |
3641 | } | |
3642 | ||
3eb39f47 CB |
3643 | /* |
3644 | * Verify that the signaler and signalee either are in the same pid namespace | |
3645 | * or that the signaler's pid namespace is an ancestor of the signalee's pid | |
3646 | * namespace. | |
3647 | */ | |
3648 | static bool access_pidfd_pidns(struct pid *pid) | |
3649 | { | |
3650 | struct pid_namespace *active = task_active_pid_ns(current); | |
3651 | struct pid_namespace *p = ns_of_pid(pid); | |
3652 | ||
3653 | for (;;) { | |
3654 | if (!p) | |
3655 | return false; | |
3656 | if (p == active) | |
3657 | break; | |
3658 | p = p->parent; | |
3659 | } | |
3660 | ||
3661 | return true; | |
3662 | } | |
3663 | ||
3664 | static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info) | |
3665 | { | |
3666 | #ifdef CONFIG_COMPAT | |
3667 | /* | |
3668 | * Avoid hooking up compat syscalls and instead handle necessary | |
3669 | * conversions here. Note, this is a stop-gap measure and should not be | |
3670 | * considered a generic solution. | |
3671 | */ | |
3672 | if (in_compat_syscall()) | |
3673 | return copy_siginfo_from_user32( | |
3674 | kinfo, (struct compat_siginfo __user *)info); | |
3675 | #endif | |
3676 | return copy_siginfo_from_user(kinfo, info); | |
3677 | } | |
3678 | ||
2151ad1b CB |
3679 | static struct pid *pidfd_to_pid(const struct file *file) |
3680 | { | |
3695eae5 CB |
3681 | struct pid *pid; |
3682 | ||
3683 | pid = pidfd_pid(file); | |
3684 | if (!IS_ERR(pid)) | |
3685 | return pid; | |
2151ad1b CB |
3686 | |
3687 | return tgid_pidfd_to_pid(file); | |
3688 | } | |
3689 | ||
3eb39f47 | 3690 | /** |
c732327f CB |
3691 | * sys_pidfd_send_signal - Signal a process through a pidfd |
3692 | * @pidfd: file descriptor of the process | |
3693 | * @sig: signal to send | |
3694 | * @info: signal info | |
3695 | * @flags: future flags | |
3eb39f47 CB |
3696 | * |
3697 | * The syscall currently only signals via PIDTYPE_PID which covers | |
3698 | * kill(<positive-pid>, <signal>. It does not signal threads or process | |
3699 | * groups. | |
3700 | * In order to extend the syscall to threads and process groups the @flags | |
3701 | * argument should be used. In essence, the @flags argument will determine | |
3702 | * what is signaled and not the file descriptor itself. Put in other words, | |
3703 | * grouping is a property of the flags argument not a property of the file | |
3704 | * descriptor. | |
3705 | * | |
3706 | * Return: 0 on success, negative errno on failure | |
3707 | */ | |
3708 | SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, | |
3709 | siginfo_t __user *, info, unsigned int, flags) | |
3710 | { | |
3711 | int ret; | |
3712 | struct fd f; | |
3713 | struct pid *pid; | |
3714 | kernel_siginfo_t kinfo; | |
3715 | ||
3716 | /* Enforce flags be set to 0 until we add an extension. */ | |
3717 | if (flags) | |
3718 | return -EINVAL; | |
3719 | ||
738a7832 | 3720 | f = fdget(pidfd); |
3eb39f47 CB |
3721 | if (!f.file) |
3722 | return -EBADF; | |
3723 | ||
3724 | /* Is this a pidfd? */ | |
2151ad1b | 3725 | pid = pidfd_to_pid(f.file); |
3eb39f47 CB |
3726 | if (IS_ERR(pid)) { |
3727 | ret = PTR_ERR(pid); | |
3728 | goto err; | |
3729 | } | |
3730 | ||
3731 | ret = -EINVAL; | |
3732 | if (!access_pidfd_pidns(pid)) | |
3733 | goto err; | |
3734 | ||
3735 | if (info) { | |
3736 | ret = copy_siginfo_from_user_any(&kinfo, info); | |
3737 | if (unlikely(ret)) | |
3738 | goto err; | |
3739 | ||
3740 | ret = -EINVAL; | |
3741 | if (unlikely(sig != kinfo.si_signo)) | |
3742 | goto err; | |
3743 | ||
556a888a JH |
3744 | /* Only allow sending arbitrary signals to yourself. */ |
3745 | ret = -EPERM; | |
3eb39f47 | 3746 | if ((task_pid(current) != pid) && |
556a888a JH |
3747 | (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) |
3748 | goto err; | |
3eb39f47 CB |
3749 | } else { |
3750 | prepare_kill_siginfo(sig, &kinfo); | |
3751 | } | |
3752 | ||
3753 | ret = kill_pid_info(sig, &kinfo, pid); | |
3754 | ||
3755 | err: | |
3756 | fdput(f); | |
3757 | return ret; | |
3758 | } | |
3eb39f47 | 3759 | |
30b4ae8a | 3760 | static int |
ae7795bc | 3761 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) |
1da177e4 | 3762 | { |
1da177e4 | 3763 | struct task_struct *p; |
30b4ae8a | 3764 | int error = -ESRCH; |
1da177e4 | 3765 | |
3547ff3a | 3766 | rcu_read_lock(); |
228ebcbe | 3767 | p = find_task_by_vpid(pid); |
b488893a | 3768 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
30b4ae8a | 3769 | error = check_kill_permission(sig, info, p); |
1da177e4 LT |
3770 | /* |
3771 | * The null signal is a permissions and process existence | |
3772 | * probe. No signal is actually delivered. | |
3773 | */ | |
4a30debf | 3774 | if (!error && sig) { |
40b3b025 | 3775 | error = do_send_sig_info(sig, info, p, PIDTYPE_PID); |
4a30debf ON |
3776 | /* |
3777 | * If lock_task_sighand() failed we pretend the task | |
3778 | * dies after receiving the signal. The window is tiny, | |
3779 | * and the signal is private anyway. | |
3780 | */ | |
3781 | if (unlikely(error == -ESRCH)) | |
3782 | error = 0; | |
1da177e4 LT |
3783 | } |
3784 | } | |
3547ff3a | 3785 | rcu_read_unlock(); |
6dd69f10 | 3786 | |
1da177e4 LT |
3787 | return error; |
3788 | } | |
3789 | ||
30b4ae8a TG |
3790 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
3791 | { | |
ae7795bc | 3792 | struct kernel_siginfo info; |
30b4ae8a | 3793 | |
5f74972c | 3794 | clear_siginfo(&info); |
30b4ae8a TG |
3795 | info.si_signo = sig; |
3796 | info.si_errno = 0; | |
3797 | info.si_code = SI_TKILL; | |
3798 | info.si_pid = task_tgid_vnr(current); | |
078de5f7 | 3799 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
30b4ae8a TG |
3800 | |
3801 | return do_send_specific(tgid, pid, sig, &info); | |
3802 | } | |
3803 | ||
6dd69f10 VL |
3804 | /** |
3805 | * sys_tgkill - send signal to one specific thread | |
3806 | * @tgid: the thread group ID of the thread | |
3807 | * @pid: the PID of the thread | |
3808 | * @sig: signal to be sent | |
3809 | * | |
72fd4a35 | 3810 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
6dd69f10 VL |
3811 | * exists but it's not belonging to the target process anymore. This |
3812 | * method solves the problem of threads exiting and PIDs getting reused. | |
3813 | */ | |
a5f8fa9e | 3814 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
6dd69f10 VL |
3815 | { |
3816 | /* This is only valid for single tasks */ | |
3817 | if (pid <= 0 || tgid <= 0) | |
3818 | return -EINVAL; | |
3819 | ||
3820 | return do_tkill(tgid, pid, sig); | |
3821 | } | |
3822 | ||
41c57892 RD |
3823 | /** |
3824 | * sys_tkill - send signal to one specific task | |
3825 | * @pid: the PID of the task | |
3826 | * @sig: signal to be sent | |
3827 | * | |
1da177e4 LT |
3828 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
3829 | */ | |
a5f8fa9e | 3830 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
1da177e4 | 3831 | { |
1da177e4 LT |
3832 | /* This is only valid for single tasks */ |
3833 | if (pid <= 0) | |
3834 | return -EINVAL; | |
3835 | ||
6dd69f10 | 3836 | return do_tkill(0, pid, sig); |
1da177e4 LT |
3837 | } |
3838 | ||
ae7795bc | 3839 | static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) |
75907d4d AV |
3840 | { |
3841 | /* Not even root can pretend to send signals from the kernel. | |
3842 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | |
3843 | */ | |
66dd34ad | 3844 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
69828dce | 3845 | (task_pid_vnr(current) != pid)) |
75907d4d | 3846 | return -EPERM; |
69828dce | 3847 | |
75907d4d AV |
3848 | /* POSIX.1b doesn't mention process groups. */ |
3849 | return kill_proc_info(sig, info, pid); | |
3850 | } | |
3851 | ||
41c57892 RD |
3852 | /** |
3853 | * sys_rt_sigqueueinfo - send signal information to a signal | |
3854 | * @pid: the PID of the thread | |
3855 | * @sig: signal to be sent | |
3856 | * @uinfo: signal info to be sent | |
3857 | */ | |
a5f8fa9e HC |
3858 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
3859 | siginfo_t __user *, uinfo) | |
1da177e4 | 3860 | { |
ae7795bc | 3861 | kernel_siginfo_t info; |
601d5abf | 3862 | int ret = __copy_siginfo_from_user(sig, &info, uinfo); |
4cd2e0e7 EB |
3863 | if (unlikely(ret)) |
3864 | return ret; | |
75907d4d AV |
3865 | return do_rt_sigqueueinfo(pid, sig, &info); |
3866 | } | |
1da177e4 | 3867 | |
75907d4d | 3868 | #ifdef CONFIG_COMPAT |
75907d4d AV |
3869 | COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, |
3870 | compat_pid_t, pid, | |
3871 | int, sig, | |
3872 | struct compat_siginfo __user *, uinfo) | |
3873 | { | |
ae7795bc | 3874 | kernel_siginfo_t info; |
601d5abf | 3875 | int ret = __copy_siginfo_from_user32(sig, &info, uinfo); |
75907d4d AV |
3876 | if (unlikely(ret)) |
3877 | return ret; | |
3878 | return do_rt_sigqueueinfo(pid, sig, &info); | |
1da177e4 | 3879 | } |
75907d4d | 3880 | #endif |
1da177e4 | 3881 | |
ae7795bc | 3882 | static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info) |
62ab4505 TG |
3883 | { |
3884 | /* This is only valid for single tasks */ | |
3885 | if (pid <= 0 || tgid <= 0) | |
3886 | return -EINVAL; | |
3887 | ||
3888 | /* Not even root can pretend to send signals from the kernel. | |
da48524e JT |
3889 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
3890 | */ | |
69828dce VD |
3891 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
3892 | (task_pid_vnr(current) != pid)) | |
62ab4505 | 3893 | return -EPERM; |
69828dce | 3894 | |
62ab4505 TG |
3895 | return do_send_specific(tgid, pid, sig, info); |
3896 | } | |
3897 | ||
3898 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, | |
3899 | siginfo_t __user *, uinfo) | |
3900 | { | |
ae7795bc | 3901 | kernel_siginfo_t info; |
601d5abf | 3902 | int ret = __copy_siginfo_from_user(sig, &info, uinfo); |
4cd2e0e7 EB |
3903 | if (unlikely(ret)) |
3904 | return ret; | |
62ab4505 TG |
3905 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
3906 | } | |
3907 | ||
9aae8fc0 AV |
3908 | #ifdef CONFIG_COMPAT |
3909 | COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, | |
3910 | compat_pid_t, tgid, | |
3911 | compat_pid_t, pid, | |
3912 | int, sig, | |
3913 | struct compat_siginfo __user *, uinfo) | |
3914 | { | |
ae7795bc | 3915 | kernel_siginfo_t info; |
601d5abf | 3916 | int ret = __copy_siginfo_from_user32(sig, &info, uinfo); |
4cd2e0e7 EB |
3917 | if (unlikely(ret)) |
3918 | return ret; | |
9aae8fc0 AV |
3919 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
3920 | } | |
3921 | #endif | |
3922 | ||
0341729b | 3923 | /* |
b4e74264 | 3924 | * For kthreads only, must not be used if cloned with CLONE_SIGHAND |
0341729b | 3925 | */ |
b4e74264 | 3926 | void kernel_sigaction(int sig, __sighandler_t action) |
0341729b | 3927 | { |
ec5955b8 | 3928 | spin_lock_irq(¤t->sighand->siglock); |
b4e74264 ON |
3929 | current->sighand->action[sig - 1].sa.sa_handler = action; |
3930 | if (action == SIG_IGN) { | |
3931 | sigset_t mask; | |
0341729b | 3932 | |
b4e74264 ON |
3933 | sigemptyset(&mask); |
3934 | sigaddset(&mask, sig); | |
580d34e4 | 3935 | |
b4e74264 ON |
3936 | flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); |
3937 | flush_sigqueue_mask(&mask, ¤t->pending); | |
3938 | recalc_sigpending(); | |
3939 | } | |
0341729b ON |
3940 | spin_unlock_irq(¤t->sighand->siglock); |
3941 | } | |
b4e74264 | 3942 | EXPORT_SYMBOL(kernel_sigaction); |
0341729b | 3943 | |
68463510 DS |
3944 | void __weak sigaction_compat_abi(struct k_sigaction *act, |
3945 | struct k_sigaction *oact) | |
3946 | { | |
3947 | } | |
3948 | ||
88531f72 | 3949 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
1da177e4 | 3950 | { |
afe2b038 | 3951 | struct task_struct *p = current, *t; |
1da177e4 | 3952 | struct k_sigaction *k; |
71fabd5e | 3953 | sigset_t mask; |
1da177e4 | 3954 | |
7ed20e1a | 3955 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
1da177e4 LT |
3956 | return -EINVAL; |
3957 | ||
afe2b038 | 3958 | k = &p->sighand->action[sig-1]; |
1da177e4 | 3959 | |
afe2b038 | 3960 | spin_lock_irq(&p->sighand->siglock); |
1da177e4 LT |
3961 | if (oact) |
3962 | *oact = *k; | |
3963 | ||
68463510 DS |
3964 | sigaction_compat_abi(act, oact); |
3965 | ||
1da177e4 | 3966 | if (act) { |
9ac95f2f ON |
3967 | sigdelsetmask(&act->sa.sa_mask, |
3968 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
88531f72 | 3969 | *k = *act; |
1da177e4 LT |
3970 | /* |
3971 | * POSIX 3.3.1.3: | |
3972 | * "Setting a signal action to SIG_IGN for a signal that is | |
3973 | * pending shall cause the pending signal to be discarded, | |
3974 | * whether or not it is blocked." | |
3975 | * | |
3976 | * "Setting a signal action to SIG_DFL for a signal that is | |
3977 | * pending and whose default action is to ignore the signal | |
3978 | * (for example, SIGCHLD), shall cause the pending signal to | |
3979 | * be discarded, whether or not it is blocked" | |
3980 | */ | |
afe2b038 | 3981 | if (sig_handler_ignored(sig_handler(p, sig), sig)) { |
71fabd5e GA |
3982 | sigemptyset(&mask); |
3983 | sigaddset(&mask, sig); | |
afe2b038 ON |
3984 | flush_sigqueue_mask(&mask, &p->signal->shared_pending); |
3985 | for_each_thread(p, t) | |
c09c1441 | 3986 | flush_sigqueue_mask(&mask, &t->pending); |
1da177e4 | 3987 | } |
1da177e4 LT |
3988 | } |
3989 | ||
afe2b038 | 3990 | spin_unlock_irq(&p->sighand->siglock); |
1da177e4 LT |
3991 | return 0; |
3992 | } | |
3993 | ||
c09c1441 | 3994 | static int |
22839869 WD |
3995 | do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, |
3996 | size_t min_ss_size) | |
1da177e4 | 3997 | { |
bcfe8ad8 | 3998 | struct task_struct *t = current; |
1da177e4 | 3999 | |
bcfe8ad8 AV |
4000 | if (oss) { |
4001 | memset(oss, 0, sizeof(stack_t)); | |
4002 | oss->ss_sp = (void __user *) t->sas_ss_sp; | |
4003 | oss->ss_size = t->sas_ss_size; | |
4004 | oss->ss_flags = sas_ss_flags(sp) | | |
4005 | (current->sas_ss_flags & SS_FLAG_BITS); | |
4006 | } | |
1da177e4 | 4007 | |
bcfe8ad8 AV |
4008 | if (ss) { |
4009 | void __user *ss_sp = ss->ss_sp; | |
4010 | size_t ss_size = ss->ss_size; | |
4011 | unsigned ss_flags = ss->ss_flags; | |
407bc16a | 4012 | int ss_mode; |
1da177e4 | 4013 | |
bcfe8ad8 AV |
4014 | if (unlikely(on_sig_stack(sp))) |
4015 | return -EPERM; | |
1da177e4 | 4016 | |
407bc16a | 4017 | ss_mode = ss_flags & ~SS_FLAG_BITS; |
bcfe8ad8 AV |
4018 | if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && |
4019 | ss_mode != 0)) | |
4020 | return -EINVAL; | |
1da177e4 | 4021 | |
407bc16a | 4022 | if (ss_mode == SS_DISABLE) { |
1da177e4 LT |
4023 | ss_size = 0; |
4024 | ss_sp = NULL; | |
4025 | } else { | |
22839869 | 4026 | if (unlikely(ss_size < min_ss_size)) |
bcfe8ad8 | 4027 | return -ENOMEM; |
1da177e4 LT |
4028 | } |
4029 | ||
bcfe8ad8 AV |
4030 | t->sas_ss_sp = (unsigned long) ss_sp; |
4031 | t->sas_ss_size = ss_size; | |
4032 | t->sas_ss_flags = ss_flags; | |
1da177e4 | 4033 | } |
bcfe8ad8 | 4034 | return 0; |
1da177e4 | 4035 | } |
bcfe8ad8 | 4036 | |
6bf9adfc AV |
4037 | SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) |
4038 | { | |
bcfe8ad8 AV |
4039 | stack_t new, old; |
4040 | int err; | |
4041 | if (uss && copy_from_user(&new, uss, sizeof(stack_t))) | |
4042 | return -EFAULT; | |
4043 | err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, | |
22839869 WD |
4044 | current_user_stack_pointer(), |
4045 | MINSIGSTKSZ); | |
bcfe8ad8 AV |
4046 | if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) |
4047 | err = -EFAULT; | |
4048 | return err; | |
6bf9adfc | 4049 | } |
1da177e4 | 4050 | |
5c49574f AV |
4051 | int restore_altstack(const stack_t __user *uss) |
4052 | { | |
bcfe8ad8 AV |
4053 | stack_t new; |
4054 | if (copy_from_user(&new, uss, sizeof(stack_t))) | |
4055 | return -EFAULT; | |
22839869 WD |
4056 | (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(), |
4057 | MINSIGSTKSZ); | |
5c49574f | 4058 | /* squash all but EFAULT for now */ |
bcfe8ad8 | 4059 | return 0; |
5c49574f AV |
4060 | } |
4061 | ||
c40702c4 AV |
4062 | int __save_altstack(stack_t __user *uss, unsigned long sp) |
4063 | { | |
4064 | struct task_struct *t = current; | |
2a742138 SS |
4065 | int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | |
4066 | __put_user(t->sas_ss_flags, &uss->ss_flags) | | |
c40702c4 | 4067 | __put_user(t->sas_ss_size, &uss->ss_size); |
2a742138 SS |
4068 | if (err) |
4069 | return err; | |
4070 | if (t->sas_ss_flags & SS_AUTODISARM) | |
4071 | sas_ss_reset(t); | |
4072 | return 0; | |
c40702c4 AV |
4073 | } |
4074 | ||
90268439 | 4075 | #ifdef CONFIG_COMPAT |
6203deb0 DB |
4076 | static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, |
4077 | compat_stack_t __user *uoss_ptr) | |
90268439 AV |
4078 | { |
4079 | stack_t uss, uoss; | |
4080 | int ret; | |
90268439 AV |
4081 | |
4082 | if (uss_ptr) { | |
4083 | compat_stack_t uss32; | |
90268439 AV |
4084 | if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) |
4085 | return -EFAULT; | |
4086 | uss.ss_sp = compat_ptr(uss32.ss_sp); | |
4087 | uss.ss_flags = uss32.ss_flags; | |
4088 | uss.ss_size = uss32.ss_size; | |
4089 | } | |
bcfe8ad8 | 4090 | ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, |
22839869 WD |
4091 | compat_user_stack_pointer(), |
4092 | COMPAT_MINSIGSTKSZ); | |
90268439 | 4093 | if (ret >= 0 && uoss_ptr) { |
bcfe8ad8 AV |
4094 | compat_stack_t old; |
4095 | memset(&old, 0, sizeof(old)); | |
4096 | old.ss_sp = ptr_to_compat(uoss.ss_sp); | |
4097 | old.ss_flags = uoss.ss_flags; | |
4098 | old.ss_size = uoss.ss_size; | |
4099 | if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) | |
90268439 AV |
4100 | ret = -EFAULT; |
4101 | } | |
4102 | return ret; | |
4103 | } | |
4104 | ||
6203deb0 DB |
4105 | COMPAT_SYSCALL_DEFINE2(sigaltstack, |
4106 | const compat_stack_t __user *, uss_ptr, | |
4107 | compat_stack_t __user *, uoss_ptr) | |
4108 | { | |
4109 | return do_compat_sigaltstack(uss_ptr, uoss_ptr); | |
4110 | } | |
4111 | ||
90268439 AV |
4112 | int compat_restore_altstack(const compat_stack_t __user *uss) |
4113 | { | |
6203deb0 | 4114 | int err = do_compat_sigaltstack(uss, NULL); |
90268439 AV |
4115 | /* squash all but -EFAULT for now */ |
4116 | return err == -EFAULT ? err : 0; | |
4117 | } | |
c40702c4 AV |
4118 | |
4119 | int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) | |
4120 | { | |
441398d3 | 4121 | int err; |
c40702c4 | 4122 | struct task_struct *t = current; |
441398d3 SS |
4123 | err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), |
4124 | &uss->ss_sp) | | |
4125 | __put_user(t->sas_ss_flags, &uss->ss_flags) | | |
c40702c4 | 4126 | __put_user(t->sas_ss_size, &uss->ss_size); |
441398d3 SS |
4127 | if (err) |
4128 | return err; | |
4129 | if (t->sas_ss_flags & SS_AUTODISARM) | |
4130 | sas_ss_reset(t); | |
4131 | return 0; | |
c40702c4 | 4132 | } |
90268439 | 4133 | #endif |
1da177e4 LT |
4134 | |
4135 | #ifdef __ARCH_WANT_SYS_SIGPENDING | |
4136 | ||
41c57892 RD |
4137 | /** |
4138 | * sys_sigpending - examine pending signals | |
d53238cd | 4139 | * @uset: where mask of pending signal is returned |
41c57892 | 4140 | */ |
d53238cd | 4141 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) |
1da177e4 | 4142 | { |
d53238cd | 4143 | sigset_t set; |
d53238cd DB |
4144 | |
4145 | if (sizeof(old_sigset_t) > sizeof(*uset)) | |
4146 | return -EINVAL; | |
4147 | ||
b1d294c8 CB |
4148 | do_sigpending(&set); |
4149 | ||
4150 | if (copy_to_user(uset, &set, sizeof(old_sigset_t))) | |
4151 | return -EFAULT; | |
4152 | ||
4153 | return 0; | |
1da177e4 LT |
4154 | } |
4155 | ||
8f13621a AV |
4156 | #ifdef CONFIG_COMPAT |
4157 | COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) | |
4158 | { | |
4159 | sigset_t set; | |
b1d294c8 CB |
4160 | |
4161 | do_sigpending(&set); | |
4162 | ||
4163 | return put_user(set.sig[0], set32); | |
8f13621a AV |
4164 | } |
4165 | #endif | |
4166 | ||
1da177e4 LT |
4167 | #endif |
4168 | ||
4169 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | |
41c57892 RD |
4170 | /** |
4171 | * sys_sigprocmask - examine and change blocked signals | |
4172 | * @how: whether to add, remove, or set signals | |
b013c399 | 4173 | * @nset: signals to add or remove (if non-null) |
41c57892 RD |
4174 | * @oset: previous value of signal mask if non-null |
4175 | * | |
5aba085e RD |
4176 | * Some platforms have their own version with special arguments; |
4177 | * others support only sys_rt_sigprocmask. | |
4178 | */ | |
1da177e4 | 4179 | |
b013c399 | 4180 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
b290ebe2 | 4181 | old_sigset_t __user *, oset) |
1da177e4 | 4182 | { |
1da177e4 | 4183 | old_sigset_t old_set, new_set; |
2e4f7c77 | 4184 | sigset_t new_blocked; |
1da177e4 | 4185 | |
b013c399 | 4186 | old_set = current->blocked.sig[0]; |
1da177e4 | 4187 | |
b013c399 ON |
4188 | if (nset) { |
4189 | if (copy_from_user(&new_set, nset, sizeof(*nset))) | |
4190 | return -EFAULT; | |
1da177e4 | 4191 | |
2e4f7c77 | 4192 | new_blocked = current->blocked; |
1da177e4 | 4193 | |
1da177e4 | 4194 | switch (how) { |
1da177e4 | 4195 | case SIG_BLOCK: |
2e4f7c77 | 4196 | sigaddsetmask(&new_blocked, new_set); |
1da177e4 LT |
4197 | break; |
4198 | case SIG_UNBLOCK: | |
2e4f7c77 | 4199 | sigdelsetmask(&new_blocked, new_set); |
1da177e4 LT |
4200 | break; |
4201 | case SIG_SETMASK: | |
2e4f7c77 | 4202 | new_blocked.sig[0] = new_set; |
1da177e4 | 4203 | break; |
2e4f7c77 ON |
4204 | default: |
4205 | return -EINVAL; | |
1da177e4 LT |
4206 | } |
4207 | ||
0c4a8423 | 4208 | set_current_blocked(&new_blocked); |
b013c399 ON |
4209 | } |
4210 | ||
4211 | if (oset) { | |
1da177e4 | 4212 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
b013c399 | 4213 | return -EFAULT; |
1da177e4 | 4214 | } |
b013c399 ON |
4215 | |
4216 | return 0; | |
1da177e4 LT |
4217 | } |
4218 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | |
4219 | ||
eaca6eae | 4220 | #ifndef CONFIG_ODD_RT_SIGACTION |
41c57892 RD |
4221 | /** |
4222 | * sys_rt_sigaction - alter an action taken by a process | |
4223 | * @sig: signal to be sent | |
f9fa0bc1 RD |
4224 | * @act: new sigaction |
4225 | * @oact: used to save the previous sigaction | |
41c57892 RD |
4226 | * @sigsetsize: size of sigset_t type |
4227 | */ | |
d4e82042 HC |
4228 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
4229 | const struct sigaction __user *, act, | |
4230 | struct sigaction __user *, oact, | |
4231 | size_t, sigsetsize) | |
1da177e4 LT |
4232 | { |
4233 | struct k_sigaction new_sa, old_sa; | |
d8f993b3 | 4234 | int ret; |
1da177e4 LT |
4235 | |
4236 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
4237 | if (sigsetsize != sizeof(sigset_t)) | |
d8f993b3 | 4238 | return -EINVAL; |
1da177e4 | 4239 | |
d8f993b3 CB |
4240 | if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
4241 | return -EFAULT; | |
1da177e4 LT |
4242 | |
4243 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | |
d8f993b3 CB |
4244 | if (ret) |
4245 | return ret; | |
1da177e4 | 4246 | |
d8f993b3 CB |
4247 | if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
4248 | return -EFAULT; | |
4249 | ||
4250 | return 0; | |
1da177e4 | 4251 | } |
08d32fe5 | 4252 | #ifdef CONFIG_COMPAT |
08d32fe5 AV |
4253 | COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, |
4254 | const struct compat_sigaction __user *, act, | |
4255 | struct compat_sigaction __user *, oact, | |
4256 | compat_size_t, sigsetsize) | |
4257 | { | |
4258 | struct k_sigaction new_ka, old_ka; | |
08d32fe5 AV |
4259 | #ifdef __ARCH_HAS_SA_RESTORER |
4260 | compat_uptr_t restorer; | |
4261 | #endif | |
4262 | int ret; | |
4263 | ||
4264 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
4265 | if (sigsetsize != sizeof(compat_sigset_t)) | |
4266 | return -EINVAL; | |
4267 | ||
4268 | if (act) { | |
4269 | compat_uptr_t handler; | |
4270 | ret = get_user(handler, &act->sa_handler); | |
4271 | new_ka.sa.sa_handler = compat_ptr(handler); | |
4272 | #ifdef __ARCH_HAS_SA_RESTORER | |
4273 | ret |= get_user(restorer, &act->sa_restorer); | |
4274 | new_ka.sa.sa_restorer = compat_ptr(restorer); | |
4275 | #endif | |
3968cf62 | 4276 | ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); |
3ddc5b46 | 4277 | ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); |
08d32fe5 AV |
4278 | if (ret) |
4279 | return -EFAULT; | |
08d32fe5 AV |
4280 | } |
4281 | ||
4282 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
4283 | if (!ret && oact) { | |
08d32fe5 AV |
4284 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), |
4285 | &oact->sa_handler); | |
f454322e DL |
4286 | ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, |
4287 | sizeof(oact->sa_mask)); | |
3ddc5b46 | 4288 | ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
08d32fe5 AV |
4289 | #ifdef __ARCH_HAS_SA_RESTORER |
4290 | ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), | |
4291 | &oact->sa_restorer); | |
4292 | #endif | |
4293 | } | |
4294 | return ret; | |
4295 | } | |
4296 | #endif | |
eaca6eae | 4297 | #endif /* !CONFIG_ODD_RT_SIGACTION */ |
1da177e4 | 4298 | |
495dfbf7 AV |
4299 | #ifdef CONFIG_OLD_SIGACTION |
4300 | SYSCALL_DEFINE3(sigaction, int, sig, | |
4301 | const struct old_sigaction __user *, act, | |
4302 | struct old_sigaction __user *, oact) | |
4303 | { | |
4304 | struct k_sigaction new_ka, old_ka; | |
4305 | int ret; | |
4306 | ||
4307 | if (act) { | |
4308 | old_sigset_t mask; | |
96d4f267 | 4309 | if (!access_ok(act, sizeof(*act)) || |
495dfbf7 AV |
4310 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
4311 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || | |
4312 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | |
4313 | __get_user(mask, &act->sa_mask)) | |
4314 | return -EFAULT; | |
4315 | #ifdef __ARCH_HAS_KA_RESTORER | |
4316 | new_ka.ka_restorer = NULL; | |
4317 | #endif | |
4318 | siginitset(&new_ka.sa.sa_mask, mask); | |
4319 | } | |
4320 | ||
4321 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
4322 | ||
4323 | if (!ret && oact) { | |
96d4f267 | 4324 | if (!access_ok(oact, sizeof(*oact)) || |
495dfbf7 AV |
4325 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
4326 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || | |
4327 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | |
4328 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | |
4329 | return -EFAULT; | |
4330 | } | |
4331 | ||
4332 | return ret; | |
4333 | } | |
4334 | #endif | |
4335 | #ifdef CONFIG_COMPAT_OLD_SIGACTION | |
4336 | COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, | |
4337 | const struct compat_old_sigaction __user *, act, | |
4338 | struct compat_old_sigaction __user *, oact) | |
4339 | { | |
4340 | struct k_sigaction new_ka, old_ka; | |
4341 | int ret; | |
4342 | compat_old_sigset_t mask; | |
4343 | compat_uptr_t handler, restorer; | |
4344 | ||
4345 | if (act) { | |
96d4f267 | 4346 | if (!access_ok(act, sizeof(*act)) || |
495dfbf7 AV |
4347 | __get_user(handler, &act->sa_handler) || |
4348 | __get_user(restorer, &act->sa_restorer) || | |
4349 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | |
4350 | __get_user(mask, &act->sa_mask)) | |
4351 | return -EFAULT; | |
4352 | ||
4353 | #ifdef __ARCH_HAS_KA_RESTORER | |
4354 | new_ka.ka_restorer = NULL; | |
4355 | #endif | |
4356 | new_ka.sa.sa_handler = compat_ptr(handler); | |
4357 | new_ka.sa.sa_restorer = compat_ptr(restorer); | |
4358 | siginitset(&new_ka.sa.sa_mask, mask); | |
4359 | } | |
4360 | ||
4361 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
4362 | ||
4363 | if (!ret && oact) { | |
96d4f267 | 4364 | if (!access_ok(oact, sizeof(*oact)) || |
495dfbf7 AV |
4365 | __put_user(ptr_to_compat(old_ka.sa.sa_handler), |
4366 | &oact->sa_handler) || | |
4367 | __put_user(ptr_to_compat(old_ka.sa.sa_restorer), | |
4368 | &oact->sa_restorer) || | |
4369 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | |
4370 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | |
4371 | return -EFAULT; | |
4372 | } | |
4373 | return ret; | |
4374 | } | |
4375 | #endif | |
1da177e4 | 4376 | |
f6187769 | 4377 | #ifdef CONFIG_SGETMASK_SYSCALL |
1da177e4 LT |
4378 | |
4379 | /* | |
4380 | * For backwards compatibility. Functionality superseded by sigprocmask. | |
4381 | */ | |
a5f8fa9e | 4382 | SYSCALL_DEFINE0(sgetmask) |
1da177e4 LT |
4383 | { |
4384 | /* SMP safe */ | |
4385 | return current->blocked.sig[0]; | |
4386 | } | |
4387 | ||
a5f8fa9e | 4388 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
1da177e4 | 4389 | { |
c1095c6d ON |
4390 | int old = current->blocked.sig[0]; |
4391 | sigset_t newset; | |
1da177e4 | 4392 | |
5ba53ff6 | 4393 | siginitset(&newset, newmask); |
c1095c6d | 4394 | set_current_blocked(&newset); |
1da177e4 LT |
4395 | |
4396 | return old; | |
4397 | } | |
f6187769 | 4398 | #endif /* CONFIG_SGETMASK_SYSCALL */ |
1da177e4 LT |
4399 | |
4400 | #ifdef __ARCH_WANT_SYS_SIGNAL | |
4401 | /* | |
4402 | * For backwards compatibility. Functionality superseded by sigaction. | |
4403 | */ | |
a5f8fa9e | 4404 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
1da177e4 LT |
4405 | { |
4406 | struct k_sigaction new_sa, old_sa; | |
4407 | int ret; | |
4408 | ||
4409 | new_sa.sa.sa_handler = handler; | |
4410 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | |
c70d3d70 | 4411 | sigemptyset(&new_sa.sa.sa_mask); |
1da177e4 LT |
4412 | |
4413 | ret = do_sigaction(sig, &new_sa, &old_sa); | |
4414 | ||
4415 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; | |
4416 | } | |
4417 | #endif /* __ARCH_WANT_SYS_SIGNAL */ | |
4418 | ||
4419 | #ifdef __ARCH_WANT_SYS_PAUSE | |
4420 | ||
a5f8fa9e | 4421 | SYSCALL_DEFINE0(pause) |
1da177e4 | 4422 | { |
d92fcf05 | 4423 | while (!signal_pending(current)) { |
1df01355 | 4424 | __set_current_state(TASK_INTERRUPTIBLE); |
d92fcf05 ON |
4425 | schedule(); |
4426 | } | |
1da177e4 LT |
4427 | return -ERESTARTNOHAND; |
4428 | } | |
4429 | ||
4430 | #endif | |
4431 | ||
9d8a7652 | 4432 | static int sigsuspend(sigset_t *set) |
68f3f16d | 4433 | { |
68f3f16d AV |
4434 | current->saved_sigmask = current->blocked; |
4435 | set_current_blocked(set); | |
4436 | ||
823dd322 SL |
4437 | while (!signal_pending(current)) { |
4438 | __set_current_state(TASK_INTERRUPTIBLE); | |
4439 | schedule(); | |
4440 | } | |
68f3f16d AV |
4441 | set_restore_sigmask(); |
4442 | return -ERESTARTNOHAND; | |
4443 | } | |
68f3f16d | 4444 | |
41c57892 RD |
4445 | /** |
4446 | * sys_rt_sigsuspend - replace the signal mask for a value with the | |
4447 | * @unewset value until a signal is received | |
4448 | * @unewset: new signal mask value | |
4449 | * @sigsetsize: size of sigset_t type | |
4450 | */ | |
d4e82042 | 4451 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
150256d8 DW |
4452 | { |
4453 | sigset_t newset; | |
4454 | ||
4455 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
4456 | if (sigsetsize != sizeof(sigset_t)) | |
4457 | return -EINVAL; | |
4458 | ||
4459 | if (copy_from_user(&newset, unewset, sizeof(newset))) | |
4460 | return -EFAULT; | |
68f3f16d | 4461 | return sigsuspend(&newset); |
150256d8 | 4462 | } |
ad4b65a4 AV |
4463 | |
4464 | #ifdef CONFIG_COMPAT | |
4465 | COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) | |
4466 | { | |
ad4b65a4 | 4467 | sigset_t newset; |
ad4b65a4 AV |
4468 | |
4469 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
4470 | if (sigsetsize != sizeof(sigset_t)) | |
4471 | return -EINVAL; | |
4472 | ||
3968cf62 | 4473 | if (get_compat_sigset(&newset, unewset)) |
ad4b65a4 | 4474 | return -EFAULT; |
ad4b65a4 | 4475 | return sigsuspend(&newset); |
ad4b65a4 AV |
4476 | } |
4477 | #endif | |
150256d8 | 4478 | |
0a0e8cdf AV |
4479 | #ifdef CONFIG_OLD_SIGSUSPEND |
4480 | SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) | |
4481 | { | |
4482 | sigset_t blocked; | |
4483 | siginitset(&blocked, mask); | |
4484 | return sigsuspend(&blocked); | |
4485 | } | |
4486 | #endif | |
4487 | #ifdef CONFIG_OLD_SIGSUSPEND3 | |
4488 | SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) | |
4489 | { | |
4490 | sigset_t blocked; | |
4491 | siginitset(&blocked, mask); | |
4492 | return sigsuspend(&blocked); | |
4493 | } | |
4494 | #endif | |
150256d8 | 4495 | |
52f5684c | 4496 | __weak const char *arch_vma_name(struct vm_area_struct *vma) |
f269fdd1 DH |
4497 | { |
4498 | return NULL; | |
4499 | } | |
4500 | ||
ae7795bc | 4501 | static inline void siginfo_buildtime_checks(void) |
1da177e4 | 4502 | { |
aba1be2f | 4503 | BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); |
41b27154 | 4504 | |
ae7795bc EB |
4505 | /* Verify the offsets in the two siginfos match */ |
4506 | #define CHECK_OFFSET(field) \ | |
4507 | BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field)) | |
4508 | ||
4509 | /* kill */ | |
4510 | CHECK_OFFSET(si_pid); | |
4511 | CHECK_OFFSET(si_uid); | |
4512 | ||
4513 | /* timer */ | |
4514 | CHECK_OFFSET(si_tid); | |
4515 | CHECK_OFFSET(si_overrun); | |
4516 | CHECK_OFFSET(si_value); | |
4517 | ||
4518 | /* rt */ | |
4519 | CHECK_OFFSET(si_pid); | |
4520 | CHECK_OFFSET(si_uid); | |
4521 | CHECK_OFFSET(si_value); | |
4522 | ||
4523 | /* sigchld */ | |
4524 | CHECK_OFFSET(si_pid); | |
4525 | CHECK_OFFSET(si_uid); | |
4526 | CHECK_OFFSET(si_status); | |
4527 | CHECK_OFFSET(si_utime); | |
4528 | CHECK_OFFSET(si_stime); | |
4529 | ||
4530 | /* sigfault */ | |
4531 | CHECK_OFFSET(si_addr); | |
4532 | CHECK_OFFSET(si_addr_lsb); | |
4533 | CHECK_OFFSET(si_lower); | |
4534 | CHECK_OFFSET(si_upper); | |
4535 | CHECK_OFFSET(si_pkey); | |
4536 | ||
4537 | /* sigpoll */ | |
4538 | CHECK_OFFSET(si_band); | |
4539 | CHECK_OFFSET(si_fd); | |
4540 | ||
4541 | /* sigsys */ | |
4542 | CHECK_OFFSET(si_call_addr); | |
4543 | CHECK_OFFSET(si_syscall); | |
4544 | CHECK_OFFSET(si_arch); | |
4545 | #undef CHECK_OFFSET | |
70f1b0d3 EB |
4546 | |
4547 | /* usb asyncio */ | |
4548 | BUILD_BUG_ON(offsetof(struct siginfo, si_pid) != | |
4549 | offsetof(struct siginfo, si_addr)); | |
4550 | if (sizeof(int) == sizeof(void __user *)) { | |
4551 | BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) != | |
4552 | sizeof(void __user *)); | |
4553 | } else { | |
4554 | BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) + | |
4555 | sizeof_field(struct siginfo, si_uid)) != | |
4556 | sizeof(void __user *)); | |
4557 | BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) != | |
4558 | offsetof(struct siginfo, si_uid)); | |
4559 | } | |
4560 | #ifdef CONFIG_COMPAT | |
4561 | BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) != | |
4562 | offsetof(struct compat_siginfo, si_addr)); | |
4563 | BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != | |
4564 | sizeof(compat_uptr_t)); | |
4565 | BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != | |
4566 | sizeof_field(struct siginfo, si_pid)); | |
4567 | #endif | |
ae7795bc EB |
4568 | } |
4569 | ||
4570 | void __init signals_init(void) | |
4571 | { | |
4572 | siginfo_buildtime_checks(); | |
4573 | ||
0a31bd5f | 4574 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
1da177e4 | 4575 | } |
67fc4e0c JW |
4576 | |
4577 | #ifdef CONFIG_KGDB_KDB | |
4578 | #include <linux/kdb.h> | |
4579 | /* | |
0b44bf9a | 4580 | * kdb_send_sig - Allows kdb to send signals without exposing |
67fc4e0c JW |
4581 | * signal internals. This function checks if the required locks are |
4582 | * available before calling the main signal code, to avoid kdb | |
4583 | * deadlocks. | |
4584 | */ | |
0b44bf9a | 4585 | void kdb_send_sig(struct task_struct *t, int sig) |
67fc4e0c JW |
4586 | { |
4587 | static struct task_struct *kdb_prev_t; | |
0b44bf9a | 4588 | int new_t, ret; |
67fc4e0c JW |
4589 | if (!spin_trylock(&t->sighand->siglock)) { |
4590 | kdb_printf("Can't do kill command now.\n" | |
4591 | "The sigmask lock is held somewhere else in " | |
4592 | "kernel, try again later\n"); | |
4593 | return; | |
4594 | } | |
67fc4e0c JW |
4595 | new_t = kdb_prev_t != t; |
4596 | kdb_prev_t = t; | |
4597 | if (t->state != TASK_RUNNING && new_t) { | |
0b44bf9a | 4598 | spin_unlock(&t->sighand->siglock); |
67fc4e0c JW |
4599 | kdb_printf("Process is not RUNNING, sending a signal from " |
4600 | "kdb risks deadlock\n" | |
4601 | "on the run queue locks. " | |
4602 | "The signal has _not_ been sent.\n" | |
4603 | "Reissue the kill command if you want to risk " | |
4604 | "the deadlock.\n"); | |
4605 | return; | |
4606 | } | |
b213984b | 4607 | ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); |
0b44bf9a EB |
4608 | spin_unlock(&t->sighand->siglock); |
4609 | if (ret) | |
67fc4e0c JW |
4610 | kdb_printf("Fail to deliver Signal %d to process %d.\n", |
4611 | sig, t->pid); | |
4612 | else | |
4613 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); | |
4614 | } | |
4615 | #endif /* CONFIG_KGDB_KDB */ |