]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/signal.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson | |
7 | * | |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. | |
9 | * Changes to use preallocated sigqueue structures | |
10 | * to allow signals to be sent reliably. | |
11 | */ | |
12 | ||
1da177e4 LT |
13 | #include <linux/slab.h> |
14 | #include <linux/module.h> | |
1da177e4 LT |
15 | #include <linux/init.h> |
16 | #include <linux/sched.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/tty.h> | |
19 | #include <linux/binfmts.h> | |
20 | #include <linux/security.h> | |
21 | #include <linux/syscalls.h> | |
22 | #include <linux/ptrace.h> | |
7ed20e1a | 23 | #include <linux/signal.h> |
c59ede7b | 24 | #include <linux/capability.h> |
7dfb7103 | 25 | #include <linux/freezer.h> |
84d73786 SB |
26 | #include <linux/pid_namespace.h> |
27 | #include <linux/nsproxy.h> | |
28 | ||
1da177e4 LT |
29 | #include <asm/param.h> |
30 | #include <asm/uaccess.h> | |
31 | #include <asm/unistd.h> | |
32 | #include <asm/siginfo.h> | |
e1396065 | 33 | #include "audit.h" /* audit_signal_info() */ |
1da177e4 LT |
34 | |
35 | /* | |
36 | * SLAB caches for signal bits. | |
37 | */ | |
38 | ||
e18b890b | 39 | static struct kmem_cache *sigqueue_cachep; |
1da177e4 | 40 | |
1da177e4 LT |
41 | |
42 | static int sig_ignored(struct task_struct *t, int sig) | |
43 | { | |
44 | void __user * handler; | |
45 | ||
46 | /* | |
47 | * Tracers always want to know about signals.. | |
48 | */ | |
49 | if (t->ptrace & PT_PTRACED) | |
50 | return 0; | |
51 | ||
52 | /* | |
53 | * Blocked signals are never ignored, since the | |
54 | * signal handler may change by the time it is | |
55 | * unblocked. | |
56 | */ | |
57 | if (sigismember(&t->blocked, sig)) | |
58 | return 0; | |
59 | ||
60 | /* Is it explicitly or implicitly ignored? */ | |
61 | handler = t->sighand->action[sig-1].sa.sa_handler; | |
62 | return handler == SIG_IGN || | |
63 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | |
64 | } | |
65 | ||
66 | /* | |
67 | * Re-calculate pending state from the set of locally pending | |
68 | * signals, globally pending signals, and blocked signals. | |
69 | */ | |
70 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) | |
71 | { | |
72 | unsigned long ready; | |
73 | long i; | |
74 | ||
75 | switch (_NSIG_WORDS) { | |
76 | default: | |
77 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) | |
78 | ready |= signal->sig[i] &~ blocked->sig[i]; | |
79 | break; | |
80 | ||
81 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; | |
82 | ready |= signal->sig[2] &~ blocked->sig[2]; | |
83 | ready |= signal->sig[1] &~ blocked->sig[1]; | |
84 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
85 | break; | |
86 | ||
87 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; | |
88 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
89 | break; | |
90 | ||
91 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; | |
92 | } | |
93 | return ready != 0; | |
94 | } | |
95 | ||
96 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) | |
97 | ||
98 | fastcall void recalc_sigpending_tsk(struct task_struct *t) | |
99 | { | |
100 | if (t->signal->group_stop_count > 0 || | |
3e1d1d28 | 101 | (freezing(t)) || |
1da177e4 LT |
102 | PENDING(&t->pending, &t->blocked) || |
103 | PENDING(&t->signal->shared_pending, &t->blocked)) | |
104 | set_tsk_thread_flag(t, TIF_SIGPENDING); | |
105 | else | |
106 | clear_tsk_thread_flag(t, TIF_SIGPENDING); | |
107 | } | |
108 | ||
109 | void recalc_sigpending(void) | |
110 | { | |
111 | recalc_sigpending_tsk(current); | |
112 | } | |
113 | ||
114 | /* Given the mask, find the first available signal that should be serviced. */ | |
115 | ||
116 | static int | |
117 | next_signal(struct sigpending *pending, sigset_t *mask) | |
118 | { | |
119 | unsigned long i, *s, *m, x; | |
120 | int sig = 0; | |
121 | ||
122 | s = pending->signal.sig; | |
123 | m = mask->sig; | |
124 | switch (_NSIG_WORDS) { | |
125 | default: | |
126 | for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) | |
127 | if ((x = *s &~ *m) != 0) { | |
128 | sig = ffz(~x) + i*_NSIG_BPW + 1; | |
129 | break; | |
130 | } | |
131 | break; | |
132 | ||
133 | case 2: if ((x = s[0] &~ m[0]) != 0) | |
134 | sig = 1; | |
135 | else if ((x = s[1] &~ m[1]) != 0) | |
136 | sig = _NSIG_BPW + 1; | |
137 | else | |
138 | break; | |
139 | sig += ffz(~x); | |
140 | break; | |
141 | ||
142 | case 1: if ((x = *s &~ *m) != 0) | |
143 | sig = ffz(~x) + 1; | |
144 | break; | |
145 | } | |
146 | ||
147 | return sig; | |
148 | } | |
149 | ||
dd0fc66f | 150 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, |
1da177e4 LT |
151 | int override_rlimit) |
152 | { | |
153 | struct sigqueue *q = NULL; | |
10b1fbdb | 154 | struct user_struct *user; |
1da177e4 | 155 | |
10b1fbdb LT |
156 | /* |
157 | * In order to avoid problems with "switch_user()", we want to make | |
158 | * sure that the compiler doesn't re-load "t->user" | |
159 | */ | |
160 | user = t->user; | |
161 | barrier(); | |
162 | atomic_inc(&user->sigpending); | |
1da177e4 | 163 | if (override_rlimit || |
10b1fbdb | 164 | atomic_read(&user->sigpending) <= |
1da177e4 LT |
165 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) |
166 | q = kmem_cache_alloc(sigqueue_cachep, flags); | |
167 | if (unlikely(q == NULL)) { | |
10b1fbdb | 168 | atomic_dec(&user->sigpending); |
1da177e4 LT |
169 | } else { |
170 | INIT_LIST_HEAD(&q->list); | |
171 | q->flags = 0; | |
10b1fbdb | 172 | q->user = get_uid(user); |
1da177e4 LT |
173 | } |
174 | return(q); | |
175 | } | |
176 | ||
514a01b8 | 177 | static void __sigqueue_free(struct sigqueue *q) |
1da177e4 LT |
178 | { |
179 | if (q->flags & SIGQUEUE_PREALLOC) | |
180 | return; | |
181 | atomic_dec(&q->user->sigpending); | |
182 | free_uid(q->user); | |
183 | kmem_cache_free(sigqueue_cachep, q); | |
184 | } | |
185 | ||
6a14c5c9 | 186 | void flush_sigqueue(struct sigpending *queue) |
1da177e4 LT |
187 | { |
188 | struct sigqueue *q; | |
189 | ||
190 | sigemptyset(&queue->signal); | |
191 | while (!list_empty(&queue->list)) { | |
192 | q = list_entry(queue->list.next, struct sigqueue , list); | |
193 | list_del_init(&q->list); | |
194 | __sigqueue_free(q); | |
195 | } | |
196 | } | |
197 | ||
198 | /* | |
199 | * Flush all pending signals for a task. | |
200 | */ | |
c81addc9 | 201 | void flush_signals(struct task_struct *t) |
1da177e4 LT |
202 | { |
203 | unsigned long flags; | |
204 | ||
205 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
206 | clear_tsk_thread_flag(t,TIF_SIGPENDING); | |
207 | flush_sigqueue(&t->pending); | |
208 | flush_sigqueue(&t->signal->shared_pending); | |
209 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | |
210 | } | |
211 | ||
1da177e4 LT |
212 | /* |
213 | * Flush all handlers for a task. | |
214 | */ | |
215 | ||
216 | void | |
217 | flush_signal_handlers(struct task_struct *t, int force_default) | |
218 | { | |
219 | int i; | |
220 | struct k_sigaction *ka = &t->sighand->action[0]; | |
221 | for (i = _NSIG ; i != 0 ; i--) { | |
222 | if (force_default || ka->sa.sa_handler != SIG_IGN) | |
223 | ka->sa.sa_handler = SIG_DFL; | |
224 | ka->sa.sa_flags = 0; | |
225 | sigemptyset(&ka->sa.sa_mask); | |
226 | ka++; | |
227 | } | |
228 | } | |
229 | ||
230 | ||
231 | /* Notify the system that a driver wants to block all signals for this | |
232 | * process, and wants to be notified if any signals at all were to be | |
233 | * sent/acted upon. If the notifier routine returns non-zero, then the | |
234 | * signal will be acted upon after all. If the notifier routine returns 0, | |
235 | * then then signal will be blocked. Only one block per process is | |
236 | * allowed. priv is a pointer to private data that the notifier routine | |
237 | * can use to determine if the signal should be blocked or not. */ | |
238 | ||
239 | void | |
240 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) | |
241 | { | |
242 | unsigned long flags; | |
243 | ||
244 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
245 | current->notifier_mask = mask; | |
246 | current->notifier_data = priv; | |
247 | current->notifier = notifier; | |
248 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
249 | } | |
250 | ||
251 | /* Notify the system that blocking has ended. */ | |
252 | ||
253 | void | |
254 | unblock_all_signals(void) | |
255 | { | |
256 | unsigned long flags; | |
257 | ||
258 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
259 | current->notifier = NULL; | |
260 | current->notifier_data = NULL; | |
261 | recalc_sigpending(); | |
262 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
263 | } | |
264 | ||
858119e1 | 265 | static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
1da177e4 LT |
266 | { |
267 | struct sigqueue *q, *first = NULL; | |
268 | int still_pending = 0; | |
269 | ||
270 | if (unlikely(!sigismember(&list->signal, sig))) | |
271 | return 0; | |
272 | ||
273 | /* | |
274 | * Collect the siginfo appropriate to this signal. Check if | |
275 | * there is another siginfo for the same signal. | |
276 | */ | |
277 | list_for_each_entry(q, &list->list, list) { | |
278 | if (q->info.si_signo == sig) { | |
279 | if (first) { | |
280 | still_pending = 1; | |
281 | break; | |
282 | } | |
283 | first = q; | |
284 | } | |
285 | } | |
286 | if (first) { | |
287 | list_del_init(&first->list); | |
288 | copy_siginfo(info, &first->info); | |
289 | __sigqueue_free(first); | |
290 | if (!still_pending) | |
291 | sigdelset(&list->signal, sig); | |
292 | } else { | |
293 | ||
294 | /* Ok, it wasn't in the queue. This must be | |
295 | a fast-pathed signal or we must have been | |
296 | out of queue space. So zero out the info. | |
297 | */ | |
298 | sigdelset(&list->signal, sig); | |
299 | info->si_signo = sig; | |
300 | info->si_errno = 0; | |
301 | info->si_code = 0; | |
302 | info->si_pid = 0; | |
303 | info->si_uid = 0; | |
304 | } | |
305 | return 1; | |
306 | } | |
307 | ||
308 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |
309 | siginfo_t *info) | |
310 | { | |
27d91e07 | 311 | int sig = next_signal(pending, mask); |
1da177e4 | 312 | |
1da177e4 LT |
313 | if (sig) { |
314 | if (current->notifier) { | |
315 | if (sigismember(current->notifier_mask, sig)) { | |
316 | if (!(current->notifier)(current->notifier_data)) { | |
317 | clear_thread_flag(TIF_SIGPENDING); | |
318 | return 0; | |
319 | } | |
320 | } | |
321 | } | |
322 | ||
323 | if (!collect_signal(sig, pending, info)) | |
324 | sig = 0; | |
1da177e4 | 325 | } |
1da177e4 LT |
326 | |
327 | return sig; | |
328 | } | |
329 | ||
330 | /* | |
331 | * Dequeue a signal and return the element to the caller, which is | |
332 | * expected to free it. | |
333 | * | |
334 | * All callers have to hold the siglock. | |
335 | */ | |
336 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |
337 | { | |
338 | int signr = __dequeue_signal(&tsk->pending, mask, info); | |
8bfd9a7a | 339 | if (!signr) { |
1da177e4 LT |
340 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
341 | mask, info); | |
8bfd9a7a TG |
342 | /* |
343 | * itimer signal ? | |
344 | * | |
345 | * itimers are process shared and we restart periodic | |
346 | * itimers in the signal delivery path to prevent DoS | |
347 | * attacks in the high resolution timer case. This is | |
348 | * compliant with the old way of self restarting | |
349 | * itimers, as the SIGALRM is a legacy signal and only | |
350 | * queued once. Changing the restart behaviour to | |
351 | * restart the timer in the signal dequeue path is | |
352 | * reducing the timer noise on heavy loaded !highres | |
353 | * systems too. | |
354 | */ | |
355 | if (unlikely(signr == SIGALRM)) { | |
356 | struct hrtimer *tmr = &tsk->signal->real_timer; | |
357 | ||
358 | if (!hrtimer_is_queued(tmr) && | |
359 | tsk->signal->it_real_incr.tv64 != 0) { | |
360 | hrtimer_forward(tmr, tmr->base->get_time(), | |
361 | tsk->signal->it_real_incr); | |
362 | hrtimer_restart(tmr); | |
363 | } | |
364 | } | |
365 | } | |
27d91e07 | 366 | recalc_sigpending_tsk(tsk); |
8bfd9a7a TG |
367 | if (signr && unlikely(sig_kernel_stop(signr))) { |
368 | /* | |
369 | * Set a marker that we have dequeued a stop signal. Our | |
370 | * caller might release the siglock and then the pending | |
371 | * stop signal it is about to process is no longer in the | |
372 | * pending bitmasks, but must still be cleared by a SIGCONT | |
373 | * (and overruled by a SIGKILL). So those cases clear this | |
374 | * shared flag after we've set it. Note that this flag may | |
375 | * remain set after the signal we return is ignored or | |
376 | * handled. That doesn't matter because its only purpose | |
377 | * is to alert stop-signal processing code when another | |
378 | * processor has come along and cleared the flag. | |
379 | */ | |
380 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) | |
381 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | |
382 | } | |
1da177e4 LT |
383 | if ( signr && |
384 | ((info->si_code & __SI_MASK) == __SI_TIMER) && | |
385 | info->si_sys_private){ | |
386 | /* | |
387 | * Release the siglock to ensure proper locking order | |
388 | * of timer locks outside of siglocks. Note, we leave | |
389 | * irqs disabled here, since the posix-timers code is | |
390 | * about to disable them again anyway. | |
391 | */ | |
392 | spin_unlock(&tsk->sighand->siglock); | |
393 | do_schedule_next_timer(info); | |
394 | spin_lock(&tsk->sighand->siglock); | |
395 | } | |
396 | return signr; | |
397 | } | |
398 | ||
399 | /* | |
400 | * Tell a process that it has a new active signal.. | |
401 | * | |
402 | * NOTE! we rely on the previous spin_lock to | |
403 | * lock interrupts for us! We can only be called with | |
404 | * "siglock" held, and the local interrupt must | |
405 | * have been disabled when that got acquired! | |
406 | * | |
407 | * No need to set need_resched since signal event passing | |
408 | * goes through ->blocked | |
409 | */ | |
410 | void signal_wake_up(struct task_struct *t, int resume) | |
411 | { | |
412 | unsigned int mask; | |
413 | ||
414 | set_tsk_thread_flag(t, TIF_SIGPENDING); | |
415 | ||
416 | /* | |
417 | * For SIGKILL, we want to wake it up in the stopped/traced case. | |
418 | * We don't check t->state here because there is a race with it | |
419 | * executing another processor and just now entering stopped state. | |
420 | * By using wake_up_state, we ensure the process will wake up and | |
421 | * handle its death signal. | |
422 | */ | |
423 | mask = TASK_INTERRUPTIBLE; | |
424 | if (resume) | |
425 | mask |= TASK_STOPPED | TASK_TRACED; | |
426 | if (!wake_up_state(t, mask)) | |
427 | kick_process(t); | |
428 | } | |
429 | ||
71fabd5e GA |
430 | /* |
431 | * Remove signals in mask from the pending set and queue. | |
432 | * Returns 1 if any signals were found. | |
433 | * | |
434 | * All callers must be holding the siglock. | |
435 | * | |
436 | * This version takes a sigset mask and looks at all signals, | |
437 | * not just those in the first mask word. | |
438 | */ | |
439 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | |
440 | { | |
441 | struct sigqueue *q, *n; | |
442 | sigset_t m; | |
443 | ||
444 | sigandsets(&m, mask, &s->signal); | |
445 | if (sigisemptyset(&m)) | |
446 | return 0; | |
447 | ||
448 | signandsets(&s->signal, &s->signal, mask); | |
449 | list_for_each_entry_safe(q, n, &s->list, list) { | |
450 | if (sigismember(mask, q->info.si_signo)) { | |
451 | list_del_init(&q->list); | |
452 | __sigqueue_free(q); | |
453 | } | |
454 | } | |
455 | return 1; | |
456 | } | |
1da177e4 LT |
457 | /* |
458 | * Remove signals in mask from the pending set and queue. | |
459 | * Returns 1 if any signals were found. | |
460 | * | |
461 | * All callers must be holding the siglock. | |
462 | */ | |
463 | static int rm_from_queue(unsigned long mask, struct sigpending *s) | |
464 | { | |
465 | struct sigqueue *q, *n; | |
466 | ||
467 | if (!sigtestsetmask(&s->signal, mask)) | |
468 | return 0; | |
469 | ||
470 | sigdelsetmask(&s->signal, mask); | |
471 | list_for_each_entry_safe(q, n, &s->list, list) { | |
472 | if (q->info.si_signo < SIGRTMIN && | |
473 | (mask & sigmask(q->info.si_signo))) { | |
474 | list_del_init(&q->list); | |
475 | __sigqueue_free(q); | |
476 | } | |
477 | } | |
478 | return 1; | |
479 | } | |
480 | ||
481 | /* | |
482 | * Bad permissions for sending the signal | |
483 | */ | |
484 | static int check_kill_permission(int sig, struct siginfo *info, | |
485 | struct task_struct *t) | |
486 | { | |
487 | int error = -EINVAL; | |
7ed20e1a | 488 | if (!valid_signal(sig)) |
1da177e4 LT |
489 | return error; |
490 | error = -EPERM; | |
621d3121 | 491 | if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) |
1da177e4 | 492 | && ((sig != SIGCONT) || |
937949d9 | 493 | (process_session(current) != process_session(t))) |
1da177e4 LT |
494 | && (current->euid ^ t->suid) && (current->euid ^ t->uid) |
495 | && (current->uid ^ t->suid) && (current->uid ^ t->uid) | |
496 | && !capable(CAP_KILL)) | |
497 | return error; | |
c2f0c7c3 | 498 | |
8f95dc58 | 499 | error = security_task_kill(t, info, sig, 0); |
c2f0c7c3 SG |
500 | if (!error) |
501 | audit_signal_info(sig, t); /* Let audit system see the signal */ | |
502 | return error; | |
1da177e4 LT |
503 | } |
504 | ||
505 | /* forward decl */ | |
a1d5e21e | 506 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why); |
1da177e4 LT |
507 | |
508 | /* | |
509 | * Handle magic process-wide effects of stop/continue signals. | |
510 | * Unlike the signal actions, these happen immediately at signal-generation | |
511 | * time regardless of blocking, ignoring, or handling. This does the | |
512 | * actual continuing for SIGCONT, but not the actual stopping for stop | |
513 | * signals. The process stop is done as a signal action for SIG_DFL. | |
514 | */ | |
515 | static void handle_stop_signal(int sig, struct task_struct *p) | |
516 | { | |
517 | struct task_struct *t; | |
518 | ||
dd12f48d | 519 | if (p->signal->flags & SIGNAL_GROUP_EXIT) |
1da177e4 LT |
520 | /* |
521 | * The process is in the middle of dying already. | |
522 | */ | |
523 | return; | |
524 | ||
525 | if (sig_kernel_stop(sig)) { | |
526 | /* | |
527 | * This is a stop signal. Remove SIGCONT from all queues. | |
528 | */ | |
529 | rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); | |
530 | t = p; | |
531 | do { | |
532 | rm_from_queue(sigmask(SIGCONT), &t->pending); | |
533 | t = next_thread(t); | |
534 | } while (t != p); | |
535 | } else if (sig == SIGCONT) { | |
536 | /* | |
537 | * Remove all stop signals from all queues, | |
538 | * and wake all threads. | |
539 | */ | |
540 | if (unlikely(p->signal->group_stop_count > 0)) { | |
541 | /* | |
542 | * There was a group stop in progress. We'll | |
543 | * pretend it finished before we got here. We are | |
544 | * obliged to report it to the parent: if the | |
545 | * SIGSTOP happened "after" this SIGCONT, then it | |
546 | * would have cleared this pending SIGCONT. If it | |
547 | * happened "before" this SIGCONT, then the parent | |
548 | * got the SIGCHLD about the stop finishing before | |
549 | * the continue happened. We do the notification | |
550 | * now, and it's as if the stop had finished and | |
551 | * the SIGCHLD was pending on entry to this kill. | |
552 | */ | |
553 | p->signal->group_stop_count = 0; | |
554 | p->signal->flags = SIGNAL_STOP_CONTINUED; | |
555 | spin_unlock(&p->sighand->siglock); | |
a1d5e21e | 556 | do_notify_parent_cldstop(p, CLD_STOPPED); |
1da177e4 LT |
557 | spin_lock(&p->sighand->siglock); |
558 | } | |
559 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); | |
560 | t = p; | |
561 | do { | |
562 | unsigned int state; | |
563 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | |
564 | ||
565 | /* | |
566 | * If there is a handler for SIGCONT, we must make | |
567 | * sure that no thread returns to user mode before | |
568 | * we post the signal, in case it was the only | |
569 | * thread eligible to run the signal handler--then | |
570 | * it must not do anything between resuming and | |
571 | * running the handler. With the TIF_SIGPENDING | |
572 | * flag set, the thread will pause and acquire the | |
573 | * siglock that we hold now and until we've queued | |
574 | * the pending signal. | |
575 | * | |
576 | * Wake up the stopped thread _after_ setting | |
577 | * TIF_SIGPENDING | |
578 | */ | |
579 | state = TASK_STOPPED; | |
580 | if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { | |
581 | set_tsk_thread_flag(t, TIF_SIGPENDING); | |
582 | state |= TASK_INTERRUPTIBLE; | |
583 | } | |
584 | wake_up_state(t, state); | |
585 | ||
586 | t = next_thread(t); | |
587 | } while (t != p); | |
588 | ||
589 | if (p->signal->flags & SIGNAL_STOP_STOPPED) { | |
590 | /* | |
591 | * We were in fact stopped, and are now continued. | |
592 | * Notify the parent with CLD_CONTINUED. | |
593 | */ | |
594 | p->signal->flags = SIGNAL_STOP_CONTINUED; | |
595 | p->signal->group_exit_code = 0; | |
596 | spin_unlock(&p->sighand->siglock); | |
a1d5e21e | 597 | do_notify_parent_cldstop(p, CLD_CONTINUED); |
1da177e4 LT |
598 | spin_lock(&p->sighand->siglock); |
599 | } else { | |
600 | /* | |
601 | * We are not stopped, but there could be a stop | |
602 | * signal in the middle of being processed after | |
603 | * being removed from the queue. Clear that too. | |
604 | */ | |
605 | p->signal->flags = 0; | |
606 | } | |
607 | } else if (sig == SIGKILL) { | |
608 | /* | |
609 | * Make sure that any pending stop signal already dequeued | |
610 | * is undone by the wakeup for SIGKILL. | |
611 | */ | |
612 | p->signal->flags = 0; | |
613 | } | |
614 | } | |
615 | ||
616 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |
617 | struct sigpending *signals) | |
618 | { | |
619 | struct sigqueue * q = NULL; | |
620 | int ret = 0; | |
621 | ||
622 | /* | |
623 | * fast-pathed signals for kernel-internal things like SIGSTOP | |
624 | * or SIGKILL. | |
625 | */ | |
b67a1b9e | 626 | if (info == SEND_SIG_FORCED) |
1da177e4 LT |
627 | goto out_set; |
628 | ||
629 | /* Real-time signals must be queued if sent by sigqueue, or | |
630 | some other real-time mechanism. It is implementation | |
631 | defined whether kill() does so. We attempt to do so, on | |
632 | the principle of least surprise, but since kill is not | |
633 | allowed to fail with EAGAIN when low on memory we just | |
634 | make sure at least one signal gets delivered and don't | |
635 | pass on the info struct. */ | |
636 | ||
637 | q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && | |
621d3121 | 638 | (is_si_special(info) || |
1da177e4 LT |
639 | info->si_code >= 0))); |
640 | if (q) { | |
641 | list_add_tail(&q->list, &signals->list); | |
642 | switch ((unsigned long) info) { | |
b67a1b9e | 643 | case (unsigned long) SEND_SIG_NOINFO: |
1da177e4 LT |
644 | q->info.si_signo = sig; |
645 | q->info.si_errno = 0; | |
646 | q->info.si_code = SI_USER; | |
647 | q->info.si_pid = current->pid; | |
648 | q->info.si_uid = current->uid; | |
649 | break; | |
b67a1b9e | 650 | case (unsigned long) SEND_SIG_PRIV: |
1da177e4 LT |
651 | q->info.si_signo = sig; |
652 | q->info.si_errno = 0; | |
653 | q->info.si_code = SI_KERNEL; | |
654 | q->info.si_pid = 0; | |
655 | q->info.si_uid = 0; | |
656 | break; | |
657 | default: | |
658 | copy_siginfo(&q->info, info); | |
659 | break; | |
660 | } | |
621d3121 ON |
661 | } else if (!is_si_special(info)) { |
662 | if (sig >= SIGRTMIN && info->si_code != SI_USER) | |
1da177e4 LT |
663 | /* |
664 | * Queue overflow, abort. We may abort if the signal was rt | |
665 | * and sent by user using something other than kill(). | |
666 | */ | |
667 | return -EAGAIN; | |
1da177e4 LT |
668 | } |
669 | ||
670 | out_set: | |
671 | sigaddset(&signals->signal, sig); | |
672 | return ret; | |
673 | } | |
674 | ||
675 | #define LEGACY_QUEUE(sigptr, sig) \ | |
676 | (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) | |
677 | ||
678 | ||
679 | static int | |
680 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |
681 | { | |
682 | int ret = 0; | |
683 | ||
fda8bd78 | 684 | BUG_ON(!irqs_disabled()); |
1da177e4 LT |
685 | assert_spin_locked(&t->sighand->siglock); |
686 | ||
1da177e4 LT |
687 | /* Short-circuit ignored signals. */ |
688 | if (sig_ignored(t, sig)) | |
689 | goto out; | |
690 | ||
691 | /* Support queueing exactly one non-rt signal, so that we | |
692 | can get more detailed information about the cause of | |
693 | the signal. */ | |
694 | if (LEGACY_QUEUE(&t->pending, sig)) | |
695 | goto out; | |
696 | ||
697 | ret = send_signal(sig, info, t, &t->pending); | |
698 | if (!ret && !sigismember(&t->blocked, sig)) | |
699 | signal_wake_up(t, sig == SIGKILL); | |
700 | out: | |
701 | return ret; | |
702 | } | |
703 | ||
704 | /* | |
705 | * Force a signal that the process can't ignore: if necessary | |
706 | * we unblock the signal and change any SIG_IGN to SIG_DFL. | |
ae74c3b6 LT |
707 | * |
708 | * Note: If we unblock the signal, we always reset it to SIG_DFL, | |
709 | * since we do not want to have a signal handler that was blocked | |
710 | * be invoked when user space had explicitly blocked it. | |
711 | * | |
712 | * We don't want to have recursive SIGSEGV's etc, for example. | |
1da177e4 | 713 | */ |
1da177e4 LT |
714 | int |
715 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |
716 | { | |
717 | unsigned long int flags; | |
ae74c3b6 LT |
718 | int ret, blocked, ignored; |
719 | struct k_sigaction *action; | |
1da177e4 LT |
720 | |
721 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
ae74c3b6 LT |
722 | action = &t->sighand->action[sig-1]; |
723 | ignored = action->sa.sa_handler == SIG_IGN; | |
724 | blocked = sigismember(&t->blocked, sig); | |
725 | if (blocked || ignored) { | |
726 | action->sa.sa_handler = SIG_DFL; | |
727 | if (blocked) { | |
728 | sigdelset(&t->blocked, sig); | |
729 | recalc_sigpending_tsk(t); | |
730 | } | |
1da177e4 LT |
731 | } |
732 | ret = specific_send_sig_info(sig, info, t); | |
733 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | |
734 | ||
735 | return ret; | |
736 | } | |
737 | ||
738 | void | |
739 | force_sig_specific(int sig, struct task_struct *t) | |
740 | { | |
b0423a0d | 741 | force_sig_info(sig, SEND_SIG_FORCED, t); |
1da177e4 LT |
742 | } |
743 | ||
744 | /* | |
745 | * Test if P wants to take SIG. After we've checked all threads with this, | |
746 | * it's equivalent to finding no threads not blocking SIG. Any threads not | |
747 | * blocking SIG were ruled out because they are not running and already | |
748 | * have pending signals. Such threads will dequeue from the shared queue | |
749 | * as soon as they're available, so putting the signal on the shared queue | |
750 | * will be equivalent to sending it to one such thread. | |
751 | */ | |
188a1eaf LT |
752 | static inline int wants_signal(int sig, struct task_struct *p) |
753 | { | |
754 | if (sigismember(&p->blocked, sig)) | |
755 | return 0; | |
756 | if (p->flags & PF_EXITING) | |
757 | return 0; | |
758 | if (sig == SIGKILL) | |
759 | return 1; | |
760 | if (p->state & (TASK_STOPPED | TASK_TRACED)) | |
761 | return 0; | |
762 | return task_curr(p) || !signal_pending(p); | |
763 | } | |
1da177e4 LT |
764 | |
765 | static void | |
766 | __group_complete_signal(int sig, struct task_struct *p) | |
767 | { | |
1da177e4 LT |
768 | struct task_struct *t; |
769 | ||
1da177e4 LT |
770 | /* |
771 | * Now find a thread we can wake up to take the signal off the queue. | |
772 | * | |
773 | * If the main thread wants the signal, it gets first crack. | |
774 | * Probably the least surprising to the average bear. | |
775 | */ | |
188a1eaf | 776 | if (wants_signal(sig, p)) |
1da177e4 LT |
777 | t = p; |
778 | else if (thread_group_empty(p)) | |
779 | /* | |
780 | * There is just one thread and it does not need to be woken. | |
781 | * It will dequeue unblocked signals before it runs again. | |
782 | */ | |
783 | return; | |
784 | else { | |
785 | /* | |
786 | * Otherwise try to find a suitable thread. | |
787 | */ | |
788 | t = p->signal->curr_target; | |
789 | if (t == NULL) | |
790 | /* restart balancing at this thread */ | |
791 | t = p->signal->curr_target = p; | |
1da177e4 | 792 | |
188a1eaf | 793 | while (!wants_signal(sig, t)) { |
1da177e4 LT |
794 | t = next_thread(t); |
795 | if (t == p->signal->curr_target) | |
796 | /* | |
797 | * No thread needs to be woken. | |
798 | * Any eligible threads will see | |
799 | * the signal in the queue soon. | |
800 | */ | |
801 | return; | |
802 | } | |
803 | p->signal->curr_target = t; | |
804 | } | |
805 | ||
806 | /* | |
807 | * Found a killable thread. If the signal will be fatal, | |
808 | * then start taking the whole group down immediately. | |
809 | */ | |
810 | if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && | |
811 | !sigismember(&t->real_blocked, sig) && | |
812 | (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { | |
813 | /* | |
814 | * This signal will be fatal to the whole group. | |
815 | */ | |
816 | if (!sig_kernel_coredump(sig)) { | |
817 | /* | |
818 | * Start a group exit and wake everybody up. | |
819 | * This way we don't have other threads | |
820 | * running and doing things after a slower | |
821 | * thread has the fatal signal pending. | |
822 | */ | |
823 | p->signal->flags = SIGNAL_GROUP_EXIT; | |
824 | p->signal->group_exit_code = sig; | |
825 | p->signal->group_stop_count = 0; | |
826 | t = p; | |
827 | do { | |
828 | sigaddset(&t->pending.signal, SIGKILL); | |
829 | signal_wake_up(t, 1); | |
830 | t = next_thread(t); | |
831 | } while (t != p); | |
832 | return; | |
833 | } | |
834 | ||
835 | /* | |
836 | * There will be a core dump. We make all threads other | |
837 | * than the chosen one go into a group stop so that nothing | |
838 | * happens until it gets scheduled, takes the signal off | |
839 | * the shared queue, and does the core dump. This is a | |
840 | * little more complicated than strictly necessary, but it | |
841 | * keeps the signal state that winds up in the core dump | |
842 | * unchanged from the death state, e.g. which thread had | |
843 | * the core-dump signal unblocked. | |
844 | */ | |
845 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | |
846 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); | |
847 | p->signal->group_stop_count = 0; | |
848 | p->signal->group_exit_task = t; | |
849 | t = p; | |
850 | do { | |
851 | p->signal->group_stop_count++; | |
852 | signal_wake_up(t, 0); | |
853 | t = next_thread(t); | |
854 | } while (t != p); | |
855 | wake_up_process(p->signal->group_exit_task); | |
856 | return; | |
857 | } | |
858 | ||
859 | /* | |
860 | * The signal is already in the shared-pending queue. | |
861 | * Tell the chosen thread to wake up and dequeue it. | |
862 | */ | |
863 | signal_wake_up(t, sig == SIGKILL); | |
864 | return; | |
865 | } | |
866 | ||
867 | int | |
868 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
869 | { | |
870 | int ret = 0; | |
871 | ||
872 | assert_spin_locked(&p->sighand->siglock); | |
873 | handle_stop_signal(sig, p); | |
874 | ||
1da177e4 LT |
875 | /* Short-circuit ignored signals. */ |
876 | if (sig_ignored(p, sig)) | |
877 | return ret; | |
878 | ||
879 | if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) | |
880 | /* This is a non-RT signal and we already have one queued. */ | |
881 | return ret; | |
882 | ||
883 | /* | |
884 | * Put this signal on the shared-pending queue, or fail with EAGAIN. | |
885 | * We always use the shared queue for process-wide signals, | |
886 | * to avoid several races. | |
887 | */ | |
888 | ret = send_signal(sig, info, p, &p->signal->shared_pending); | |
889 | if (unlikely(ret)) | |
890 | return ret; | |
891 | ||
892 | __group_complete_signal(sig, p); | |
893 | return 0; | |
894 | } | |
895 | ||
896 | /* | |
897 | * Nuke all other threads in the group. | |
898 | */ | |
899 | void zap_other_threads(struct task_struct *p) | |
900 | { | |
901 | struct task_struct *t; | |
902 | ||
903 | p->signal->flags = SIGNAL_GROUP_EXIT; | |
904 | p->signal->group_stop_count = 0; | |
905 | ||
906 | if (thread_group_empty(p)) | |
907 | return; | |
908 | ||
909 | for (t = next_thread(p); t != p; t = next_thread(t)) { | |
910 | /* | |
911 | * Don't bother with already dead threads | |
912 | */ | |
913 | if (t->exit_state) | |
914 | continue; | |
915 | ||
916 | /* | |
917 | * We don't want to notify the parent, since we are | |
918 | * killed as part of a thread group due to another | |
919 | * thread doing an execve() or similar. So set the | |
920 | * exit signal to -1 to allow immediate reaping of | |
921 | * the process. But don't detach the thread group | |
922 | * leader. | |
923 | */ | |
924 | if (t != p->group_leader) | |
925 | t->exit_signal = -1; | |
926 | ||
30e0fca6 | 927 | /* SIGKILL will be handled before any pending SIGSTOP */ |
1da177e4 | 928 | sigaddset(&t->pending.signal, SIGKILL); |
1da177e4 LT |
929 | signal_wake_up(t, 1); |
930 | } | |
931 | } | |
932 | ||
933 | /* | |
e56d0903 | 934 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. |
1da177e4 | 935 | */ |
f63ee72e ON |
936 | struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) |
937 | { | |
938 | struct sighand_struct *sighand; | |
939 | ||
940 | for (;;) { | |
941 | sighand = rcu_dereference(tsk->sighand); | |
942 | if (unlikely(sighand == NULL)) | |
943 | break; | |
944 | ||
945 | spin_lock_irqsave(&sighand->siglock, *flags); | |
946 | if (likely(sighand == tsk->sighand)) | |
947 | break; | |
948 | spin_unlock_irqrestore(&sighand->siglock, *flags); | |
949 | } | |
950 | ||
951 | return sighand; | |
952 | } | |
953 | ||
1da177e4 LT |
954 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
955 | { | |
956 | unsigned long flags; | |
957 | int ret; | |
958 | ||
959 | ret = check_kill_permission(sig, info, p); | |
f63ee72e ON |
960 | |
961 | if (!ret && sig) { | |
962 | ret = -ESRCH; | |
963 | if (lock_task_sighand(p, &flags)) { | |
964 | ret = __group_send_sig_info(sig, info, p); | |
965 | unlock_task_sighand(p, &flags); | |
2d89c929 | 966 | } |
1da177e4 LT |
967 | } |
968 | ||
969 | return ret; | |
970 | } | |
971 | ||
972 | /* | |
c4b92fc1 | 973 | * kill_pgrp_info() sends a signal to a process group: this is what the tty |
1da177e4 LT |
974 | * control characters do (^C, ^Z etc) |
975 | */ | |
976 | ||
c4b92fc1 | 977 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1da177e4 LT |
978 | { |
979 | struct task_struct *p = NULL; | |
980 | int retval, success; | |
981 | ||
1da177e4 LT |
982 | success = 0; |
983 | retval = -ESRCH; | |
c4b92fc1 | 984 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1da177e4 LT |
985 | int err = group_send_sig_info(sig, info, p); |
986 | success |= !err; | |
987 | retval = err; | |
c4b92fc1 | 988 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4 LT |
989 | return success ? 0 : retval; |
990 | } | |
991 | ||
c4b92fc1 EB |
992 | int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
993 | { | |
994 | int retval; | |
995 | ||
996 | read_lock(&tasklist_lock); | |
997 | retval = __kill_pgrp_info(sig, info, pgrp); | |
998 | read_unlock(&tasklist_lock); | |
999 | ||
1000 | return retval; | |
1001 | } | |
1002 | ||
c4b92fc1 | 1003 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1da177e4 LT |
1004 | { |
1005 | int error; | |
1006 | struct task_struct *p; | |
1007 | ||
e56d0903 | 1008 | rcu_read_lock(); |
0c12b517 | 1009 | if (unlikely(sig_needs_tasklist(sig))) |
e56d0903 | 1010 | read_lock(&tasklist_lock); |
0c12b517 | 1011 | |
c4b92fc1 | 1012 | p = pid_task(pid, PIDTYPE_PID); |
1da177e4 LT |
1013 | error = -ESRCH; |
1014 | if (p) | |
1015 | error = group_send_sig_info(sig, info, p); | |
0c12b517 ON |
1016 | |
1017 | if (unlikely(sig_needs_tasklist(sig))) | |
e56d0903 IM |
1018 | read_unlock(&tasklist_lock); |
1019 | rcu_read_unlock(); | |
1da177e4 LT |
1020 | return error; |
1021 | } | |
1022 | ||
c3de4b38 MW |
1023 | int |
1024 | kill_proc_info(int sig, struct siginfo *info, pid_t pid) | |
c4b92fc1 EB |
1025 | { |
1026 | int error; | |
1027 | rcu_read_lock(); | |
1028 | error = kill_pid_info(sig, info, find_pid(pid)); | |
1029 | rcu_read_unlock(); | |
1030 | return error; | |
1031 | } | |
1032 | ||
2425c08b EB |
1033 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
1034 | int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, | |
8f95dc58 | 1035 | uid_t uid, uid_t euid, u32 secid) |
46113830 HW |
1036 | { |
1037 | int ret = -EINVAL; | |
1038 | struct task_struct *p; | |
1039 | ||
1040 | if (!valid_signal(sig)) | |
1041 | return ret; | |
1042 | ||
1043 | read_lock(&tasklist_lock); | |
2425c08b | 1044 | p = pid_task(pid, PIDTYPE_PID); |
46113830 HW |
1045 | if (!p) { |
1046 | ret = -ESRCH; | |
1047 | goto out_unlock; | |
1048 | } | |
0811af28 | 1049 | if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) |
46113830 HW |
1050 | && (euid != p->suid) && (euid != p->uid) |
1051 | && (uid != p->suid) && (uid != p->uid)) { | |
1052 | ret = -EPERM; | |
1053 | goto out_unlock; | |
1054 | } | |
8f95dc58 DQ |
1055 | ret = security_task_kill(p, info, sig, secid); |
1056 | if (ret) | |
1057 | goto out_unlock; | |
46113830 HW |
1058 | if (sig && p->sighand) { |
1059 | unsigned long flags; | |
1060 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1061 | ret = __group_send_sig_info(sig, info, p); | |
1062 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1063 | } | |
1064 | out_unlock: | |
1065 | read_unlock(&tasklist_lock); | |
1066 | return ret; | |
1067 | } | |
2425c08b | 1068 | EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); |
1da177e4 LT |
1069 | |
1070 | /* | |
1071 | * kill_something_info() interprets pid in interesting ways just like kill(2). | |
1072 | * | |
1073 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have | |
1074 | * is probably wrong. Should make it like BSD or SYSV. | |
1075 | */ | |
1076 | ||
1077 | static int kill_something_info(int sig, struct siginfo *info, int pid) | |
1078 | { | |
8d42db18 EB |
1079 | int ret; |
1080 | rcu_read_lock(); | |
1da177e4 | 1081 | if (!pid) { |
8d42db18 | 1082 | ret = kill_pgrp_info(sig, info, task_pgrp(current)); |
1da177e4 LT |
1083 | } else if (pid == -1) { |
1084 | int retval = 0, count = 0; | |
1085 | struct task_struct * p; | |
1086 | ||
1087 | read_lock(&tasklist_lock); | |
1088 | for_each_process(p) { | |
1089 | if (p->pid > 1 && p->tgid != current->tgid) { | |
1090 | int err = group_send_sig_info(sig, info, p); | |
1091 | ++count; | |
1092 | if (err != -EPERM) | |
1093 | retval = err; | |
1094 | } | |
1095 | } | |
1096 | read_unlock(&tasklist_lock); | |
8d42db18 | 1097 | ret = count ? retval : -ESRCH; |
1da177e4 | 1098 | } else if (pid < 0) { |
8d42db18 | 1099 | ret = kill_pgrp_info(sig, info, find_pid(-pid)); |
1da177e4 | 1100 | } else { |
8d42db18 | 1101 | ret = kill_pid_info(sig, info, find_pid(pid)); |
1da177e4 | 1102 | } |
8d42db18 EB |
1103 | rcu_read_unlock(); |
1104 | return ret; | |
1da177e4 LT |
1105 | } |
1106 | ||
1107 | /* | |
1108 | * These are for backward compatibility with the rest of the kernel source. | |
1109 | */ | |
1110 | ||
1111 | /* | |
1112 | * These two are the most common entry points. They send a signal | |
1113 | * just to the specific thread. | |
1114 | */ | |
1115 | int | |
1116 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
1117 | { | |
1118 | int ret; | |
1119 | unsigned long flags; | |
1120 | ||
1121 | /* | |
1122 | * Make sure legacy kernel users don't send in bad values | |
1123 | * (normal paths check this in check_kill_permission). | |
1124 | */ | |
7ed20e1a | 1125 | if (!valid_signal(sig)) |
1da177e4 LT |
1126 | return -EINVAL; |
1127 | ||
1128 | /* | |
1129 | * We need the tasklist lock even for the specific | |
1130 | * thread case (when we don't need to follow the group | |
1131 | * lists) in order to avoid races with "p->sighand" | |
1132 | * going away or changing from under us. | |
1133 | */ | |
1134 | read_lock(&tasklist_lock); | |
1135 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1136 | ret = specific_send_sig_info(sig, info, p); | |
1137 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1138 | read_unlock(&tasklist_lock); | |
1139 | return ret; | |
1140 | } | |
1141 | ||
b67a1b9e ON |
1142 | #define __si_special(priv) \ |
1143 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) | |
1144 | ||
1da177e4 LT |
1145 | int |
1146 | send_sig(int sig, struct task_struct *p, int priv) | |
1147 | { | |
b67a1b9e | 1148 | return send_sig_info(sig, __si_special(priv), p); |
1da177e4 LT |
1149 | } |
1150 | ||
1151 | /* | |
1152 | * This is the entry point for "process-wide" signals. | |
1153 | * They will go to an appropriate thread in the thread group. | |
1154 | */ | |
1155 | int | |
1156 | send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
1157 | { | |
1158 | int ret; | |
1159 | read_lock(&tasklist_lock); | |
1160 | ret = group_send_sig_info(sig, info, p); | |
1161 | read_unlock(&tasklist_lock); | |
1162 | return ret; | |
1163 | } | |
1164 | ||
1165 | void | |
1166 | force_sig(int sig, struct task_struct *p) | |
1167 | { | |
b67a1b9e | 1168 | force_sig_info(sig, SEND_SIG_PRIV, p); |
1da177e4 LT |
1169 | } |
1170 | ||
1171 | /* | |
1172 | * When things go south during signal handling, we | |
1173 | * will force a SIGSEGV. And if the signal that caused | |
1174 | * the problem was already a SIGSEGV, we'll want to | |
1175 | * make sure we don't even try to deliver the signal.. | |
1176 | */ | |
1177 | int | |
1178 | force_sigsegv(int sig, struct task_struct *p) | |
1179 | { | |
1180 | if (sig == SIGSEGV) { | |
1181 | unsigned long flags; | |
1182 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1183 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | |
1184 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1185 | } | |
1186 | force_sig(SIGSEGV, p); | |
1187 | return 0; | |
1188 | } | |
1189 | ||
c4b92fc1 EB |
1190 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1191 | { | |
1192 | return kill_pgrp_info(sig, __si_special(priv), pid); | |
1193 | } | |
1194 | EXPORT_SYMBOL(kill_pgrp); | |
1195 | ||
1196 | int kill_pid(struct pid *pid, int sig, int priv) | |
1197 | { | |
1198 | return kill_pid_info(sig, __si_special(priv), pid); | |
1199 | } | |
1200 | EXPORT_SYMBOL(kill_pid); | |
1201 | ||
1da177e4 LT |
1202 | int |
1203 | kill_proc(pid_t pid, int sig, int priv) | |
1204 | { | |
b67a1b9e | 1205 | return kill_proc_info(sig, __si_special(priv), pid); |
1da177e4 LT |
1206 | } |
1207 | ||
1208 | /* | |
1209 | * These functions support sending signals using preallocated sigqueue | |
1210 | * structures. This is needed "because realtime applications cannot | |
1211 | * afford to lose notifications of asynchronous events, like timer | |
1212 | * expirations or I/O completions". In the case of Posix Timers | |
1213 | * we allocate the sigqueue structure from the timer_create. If this | |
1214 | * allocation fails we are able to report the failure to the application | |
1215 | * with an EAGAIN error. | |
1216 | */ | |
1217 | ||
1218 | struct sigqueue *sigqueue_alloc(void) | |
1219 | { | |
1220 | struct sigqueue *q; | |
1221 | ||
1222 | if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) | |
1223 | q->flags |= SIGQUEUE_PREALLOC; | |
1224 | return(q); | |
1225 | } | |
1226 | ||
1227 | void sigqueue_free(struct sigqueue *q) | |
1228 | { | |
1229 | unsigned long flags; | |
1230 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | |
1231 | /* | |
1232 | * If the signal is still pending remove it from the | |
1233 | * pending queue. | |
1234 | */ | |
1235 | if (unlikely(!list_empty(&q->list))) { | |
19a4fcb5 ON |
1236 | spinlock_t *lock = ¤t->sighand->siglock; |
1237 | read_lock(&tasklist_lock); | |
1238 | spin_lock_irqsave(lock, flags); | |
1da177e4 LT |
1239 | if (!list_empty(&q->list)) |
1240 | list_del_init(&q->list); | |
19a4fcb5 | 1241 | spin_unlock_irqrestore(lock, flags); |
1da177e4 LT |
1242 | read_unlock(&tasklist_lock); |
1243 | } | |
1244 | q->flags &= ~SIGQUEUE_PREALLOC; | |
1245 | __sigqueue_free(q); | |
1246 | } | |
1247 | ||
54767908 | 1248 | int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) |
1da177e4 LT |
1249 | { |
1250 | unsigned long flags; | |
1251 | int ret = 0; | |
1252 | ||
1da177e4 | 1253 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
e56d0903 IM |
1254 | |
1255 | /* | |
1256 | * The rcu based delayed sighand destroy makes it possible to | |
1257 | * run this without tasklist lock held. The task struct itself | |
1258 | * cannot go away as create_timer did get_task_struct(). | |
1259 | * | |
1260 | * We return -1, when the task is marked exiting, so | |
1261 | * posix_timer_event can redirect it to the group leader | |
1262 | */ | |
1263 | rcu_read_lock(); | |
e752dd6c | 1264 | |
54767908 | 1265 | if (!likely(lock_task_sighand(p, &flags))) { |
e752dd6c ON |
1266 | ret = -1; |
1267 | goto out_err; | |
1268 | } | |
1269 | ||
1da177e4 LT |
1270 | if (unlikely(!list_empty(&q->list))) { |
1271 | /* | |
1272 | * If an SI_TIMER entry is already queue just increment | |
1273 | * the overrun count. | |
1274 | */ | |
54767908 | 1275 | BUG_ON(q->info.si_code != SI_TIMER); |
1da177e4 LT |
1276 | q->info.si_overrun++; |
1277 | goto out; | |
e752dd6c | 1278 | } |
1da177e4 LT |
1279 | /* Short-circuit ignored signals. */ |
1280 | if (sig_ignored(p, sig)) { | |
1281 | ret = 1; | |
1282 | goto out; | |
1283 | } | |
1284 | ||
1da177e4 LT |
1285 | list_add_tail(&q->list, &p->pending.list); |
1286 | sigaddset(&p->pending.signal, sig); | |
1287 | if (!sigismember(&p->blocked, sig)) | |
1288 | signal_wake_up(p, sig == SIGKILL); | |
1289 | ||
1290 | out: | |
54767908 | 1291 | unlock_task_sighand(p, &flags); |
e752dd6c | 1292 | out_err: |
e56d0903 | 1293 | rcu_read_unlock(); |
e752dd6c ON |
1294 | |
1295 | return ret; | |
1da177e4 LT |
1296 | } |
1297 | ||
1298 | int | |
1299 | send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |
1300 | { | |
1301 | unsigned long flags; | |
1302 | int ret = 0; | |
1303 | ||
1304 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | |
e56d0903 | 1305 | |
1da177e4 | 1306 | read_lock(&tasklist_lock); |
e56d0903 | 1307 | /* Since it_lock is held, p->sighand cannot be NULL. */ |
1da177e4 LT |
1308 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1309 | handle_stop_signal(sig, p); | |
1310 | ||
1311 | /* Short-circuit ignored signals. */ | |
1312 | if (sig_ignored(p, sig)) { | |
1313 | ret = 1; | |
1314 | goto out; | |
1315 | } | |
1316 | ||
1317 | if (unlikely(!list_empty(&q->list))) { | |
1318 | /* | |
1319 | * If an SI_TIMER entry is already queue just increment | |
1320 | * the overrun count. Other uses should not try to | |
1321 | * send the signal multiple times. | |
1322 | */ | |
fda8bd78 | 1323 | BUG_ON(q->info.si_code != SI_TIMER); |
1da177e4 LT |
1324 | q->info.si_overrun++; |
1325 | goto out; | |
1326 | } | |
1327 | ||
1328 | /* | |
1329 | * Put this signal on the shared-pending queue. | |
1330 | * We always use the shared queue for process-wide signals, | |
1331 | * to avoid several races. | |
1332 | */ | |
1da177e4 LT |
1333 | list_add_tail(&q->list, &p->signal->shared_pending.list); |
1334 | sigaddset(&p->signal->shared_pending.signal, sig); | |
1335 | ||
1336 | __group_complete_signal(sig, p); | |
1337 | out: | |
1338 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1339 | read_unlock(&tasklist_lock); | |
e56d0903 | 1340 | return ret; |
1da177e4 LT |
1341 | } |
1342 | ||
1343 | /* | |
1344 | * Wake up any threads in the parent blocked in wait* syscalls. | |
1345 | */ | |
1346 | static inline void __wake_up_parent(struct task_struct *p, | |
1347 | struct task_struct *parent) | |
1348 | { | |
1349 | wake_up_interruptible_sync(&parent->signal->wait_chldexit); | |
1350 | } | |
1351 | ||
1352 | /* | |
1353 | * Let a parent know about the death of a child. | |
1354 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | |
1355 | */ | |
1356 | ||
1357 | void do_notify_parent(struct task_struct *tsk, int sig) | |
1358 | { | |
1359 | struct siginfo info; | |
1360 | unsigned long flags; | |
1361 | struct sighand_struct *psig; | |
1362 | ||
1363 | BUG_ON(sig == -1); | |
1364 | ||
1365 | /* do_notify_parent_cldstop should have been called instead. */ | |
1366 | BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); | |
1367 | ||
1368 | BUG_ON(!tsk->ptrace && | |
1369 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); | |
1370 | ||
1371 | info.si_signo = sig; | |
1372 | info.si_errno = 0; | |
1373 | info.si_pid = tsk->pid; | |
1374 | info.si_uid = tsk->uid; | |
1375 | ||
1376 | /* FIXME: find out whether or not this is supposed to be c*time. */ | |
1377 | info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, | |
1378 | tsk->signal->utime)); | |
1379 | info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, | |
1380 | tsk->signal->stime)); | |
1381 | ||
1382 | info.si_status = tsk->exit_code & 0x7f; | |
1383 | if (tsk->exit_code & 0x80) | |
1384 | info.si_code = CLD_DUMPED; | |
1385 | else if (tsk->exit_code & 0x7f) | |
1386 | info.si_code = CLD_KILLED; | |
1387 | else { | |
1388 | info.si_code = CLD_EXITED; | |
1389 | info.si_status = tsk->exit_code >> 8; | |
1390 | } | |
1391 | ||
1392 | psig = tsk->parent->sighand; | |
1393 | spin_lock_irqsave(&psig->siglock, flags); | |
7ed0175a | 1394 | if (!tsk->ptrace && sig == SIGCHLD && |
1da177e4 LT |
1395 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1396 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { | |
1397 | /* | |
1398 | * We are exiting and our parent doesn't care. POSIX.1 | |
1399 | * defines special semantics for setting SIGCHLD to SIG_IGN | |
1400 | * or setting the SA_NOCLDWAIT flag: we should be reaped | |
1401 | * automatically and not left for our parent's wait4 call. | |
1402 | * Rather than having the parent do it as a magic kind of | |
1403 | * signal handler, we just set this to tell do_exit that we | |
1404 | * can be cleaned up without becoming a zombie. Note that | |
1405 | * we still call __wake_up_parent in this case, because a | |
1406 | * blocked sys_wait4 might now return -ECHILD. | |
1407 | * | |
1408 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT | |
1409 | * is implementation-defined: we do (if you don't want | |
1410 | * it, just use SIG_IGN instead). | |
1411 | */ | |
1412 | tsk->exit_signal = -1; | |
1413 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) | |
1414 | sig = 0; | |
1415 | } | |
7ed20e1a | 1416 | if (valid_signal(sig) && sig > 0) |
1da177e4 LT |
1417 | __group_send_sig_info(sig, &info, tsk->parent); |
1418 | __wake_up_parent(tsk, tsk->parent); | |
1419 | spin_unlock_irqrestore(&psig->siglock, flags); | |
1420 | } | |
1421 | ||
a1d5e21e | 1422 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) |
1da177e4 LT |
1423 | { |
1424 | struct siginfo info; | |
1425 | unsigned long flags; | |
bc505a47 | 1426 | struct task_struct *parent; |
1da177e4 LT |
1427 | struct sighand_struct *sighand; |
1428 | ||
a1d5e21e | 1429 | if (tsk->ptrace & PT_PTRACED) |
bc505a47 ON |
1430 | parent = tsk->parent; |
1431 | else { | |
1432 | tsk = tsk->group_leader; | |
1433 | parent = tsk->real_parent; | |
1434 | } | |
1435 | ||
1da177e4 LT |
1436 | info.si_signo = SIGCHLD; |
1437 | info.si_errno = 0; | |
1438 | info.si_pid = tsk->pid; | |
1439 | info.si_uid = tsk->uid; | |
1440 | ||
1441 | /* FIXME: find out whether or not this is supposed to be c*time. */ | |
1442 | info.si_utime = cputime_to_jiffies(tsk->utime); | |
1443 | info.si_stime = cputime_to_jiffies(tsk->stime); | |
1444 | ||
1445 | info.si_code = why; | |
1446 | switch (why) { | |
1447 | case CLD_CONTINUED: | |
1448 | info.si_status = SIGCONT; | |
1449 | break; | |
1450 | case CLD_STOPPED: | |
1451 | info.si_status = tsk->signal->group_exit_code & 0x7f; | |
1452 | break; | |
1453 | case CLD_TRAPPED: | |
1454 | info.si_status = tsk->exit_code & 0x7f; | |
1455 | break; | |
1456 | default: | |
1457 | BUG(); | |
1458 | } | |
1459 | ||
1460 | sighand = parent->sighand; | |
1461 | spin_lock_irqsave(&sighand->siglock, flags); | |
1462 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && | |
1463 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) | |
1464 | __group_send_sig_info(SIGCHLD, &info, parent); | |
1465 | /* | |
1466 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. | |
1467 | */ | |
1468 | __wake_up_parent(tsk, parent); | |
1469 | spin_unlock_irqrestore(&sighand->siglock, flags); | |
1470 | } | |
1471 | ||
d5f70c00 ON |
1472 | static inline int may_ptrace_stop(void) |
1473 | { | |
1474 | if (!likely(current->ptrace & PT_PTRACED)) | |
1475 | return 0; | |
1476 | ||
1477 | if (unlikely(current->parent == current->real_parent && | |
1478 | (current->ptrace & PT_ATTACHED))) | |
1479 | return 0; | |
1480 | ||
1481 | if (unlikely(current->signal == current->parent->signal) && | |
1482 | unlikely(current->signal->flags & SIGNAL_GROUP_EXIT)) | |
1483 | return 0; | |
1484 | ||
1485 | /* | |
1486 | * Are we in the middle of do_coredump? | |
1487 | * If so and our tracer is also part of the coredump stopping | |
1488 | * is a deadlock situation, and pointless because our tracer | |
1489 | * is dead so don't allow us to stop. | |
1490 | * If SIGKILL was already sent before the caller unlocked | |
1491 | * ->siglock we must see ->core_waiters != 0. Otherwise it | |
1492 | * is safe to enter schedule(). | |
1493 | */ | |
1494 | if (unlikely(current->mm->core_waiters) && | |
1495 | unlikely(current->mm == current->parent->mm)) | |
1496 | return 0; | |
1497 | ||
1498 | return 1; | |
1499 | } | |
1500 | ||
1da177e4 LT |
1501 | /* |
1502 | * This must be called with current->sighand->siglock held. | |
1503 | * | |
1504 | * This should be the path for all ptrace stops. | |
1505 | * We always set current->last_siginfo while stopped here. | |
1506 | * That makes it a way to test a stopped process for | |
1507 | * being ptrace-stopped vs being job-control-stopped. | |
1508 | * | |
1509 | * If we actually decide not to stop at all because the tracer is gone, | |
1510 | * we leave nostop_code in current->exit_code. | |
1511 | */ | |
1512 | static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) | |
1513 | { | |
1514 | /* | |
1515 | * If there is a group stop in progress, | |
1516 | * we must participate in the bookkeeping. | |
1517 | */ | |
1518 | if (current->signal->group_stop_count > 0) | |
1519 | --current->signal->group_stop_count; | |
1520 | ||
1521 | current->last_siginfo = info; | |
1522 | current->exit_code = exit_code; | |
1523 | ||
1524 | /* Let the debugger run. */ | |
1525 | set_current_state(TASK_TRACED); | |
1526 | spin_unlock_irq(¤t->sighand->siglock); | |
85b6bce3 | 1527 | try_to_freeze(); |
1da177e4 | 1528 | read_lock(&tasklist_lock); |
d5f70c00 | 1529 | if (may_ptrace_stop()) { |
a1d5e21e | 1530 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
1da177e4 LT |
1531 | read_unlock(&tasklist_lock); |
1532 | schedule(); | |
1533 | } else { | |
1534 | /* | |
1535 | * By the time we got the lock, our tracer went away. | |
1536 | * Don't stop here. | |
1537 | */ | |
1538 | read_unlock(&tasklist_lock); | |
1539 | set_current_state(TASK_RUNNING); | |
1540 | current->exit_code = nostop_code; | |
1541 | } | |
1542 | ||
1543 | /* | |
1544 | * We are back. Now reacquire the siglock before touching | |
1545 | * last_siginfo, so that we are sure to have synchronized with | |
1546 | * any signal-sending on another CPU that wants to examine it. | |
1547 | */ | |
1548 | spin_lock_irq(¤t->sighand->siglock); | |
1549 | current->last_siginfo = NULL; | |
1550 | ||
1551 | /* | |
1552 | * Queued signals ignored us while we were stopped for tracing. | |
1553 | * So check for any that we should take before resuming user mode. | |
1554 | */ | |
1555 | recalc_sigpending(); | |
1556 | } | |
1557 | ||
1558 | void ptrace_notify(int exit_code) | |
1559 | { | |
1560 | siginfo_t info; | |
1561 | ||
1562 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); | |
1563 | ||
1564 | memset(&info, 0, sizeof info); | |
1565 | info.si_signo = SIGTRAP; | |
1566 | info.si_code = exit_code; | |
1567 | info.si_pid = current->pid; | |
1568 | info.si_uid = current->uid; | |
1569 | ||
1570 | /* Let the debugger run. */ | |
1571 | spin_lock_irq(¤t->sighand->siglock); | |
1572 | ptrace_stop(exit_code, 0, &info); | |
1573 | spin_unlock_irq(¤t->sighand->siglock); | |
1574 | } | |
1575 | ||
1da177e4 LT |
1576 | static void |
1577 | finish_stop(int stop_count) | |
1578 | { | |
1579 | /* | |
1580 | * If there are no other threads in the group, or if there is | |
1581 | * a group stop in progress and we are the last to stop, | |
1582 | * report to the parent. When ptraced, every thread reports itself. | |
1583 | */ | |
a1d5e21e ON |
1584 | if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { |
1585 | read_lock(&tasklist_lock); | |
1586 | do_notify_parent_cldstop(current, CLD_STOPPED); | |
1587 | read_unlock(&tasklist_lock); | |
1588 | } | |
bc505a47 | 1589 | |
3df494a3 RW |
1590 | do { |
1591 | schedule(); | |
1592 | } while (try_to_freeze()); | |
1da177e4 LT |
1593 | /* |
1594 | * Now we don't run again until continued. | |
1595 | */ | |
1596 | current->exit_code = 0; | |
1597 | } | |
1598 | ||
1599 | /* | |
1600 | * This performs the stopping for SIGSTOP and other stop signals. | |
1601 | * We have to stop all threads in the thread group. | |
1602 | * Returns nonzero if we've actually stopped and released the siglock. | |
1603 | * Returns zero if we didn't stop and still hold the siglock. | |
1604 | */ | |
a122b341 | 1605 | static int do_signal_stop(int signr) |
1da177e4 LT |
1606 | { |
1607 | struct signal_struct *sig = current->signal; | |
dac27f4a | 1608 | int stop_count; |
1da177e4 LT |
1609 | |
1610 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) | |
1611 | return 0; | |
1612 | ||
1613 | if (sig->group_stop_count > 0) { | |
1614 | /* | |
1615 | * There is a group stop in progress. We don't need to | |
1616 | * start another one. | |
1617 | */ | |
1da177e4 | 1618 | stop_count = --sig->group_stop_count; |
dac27f4a | 1619 | } else { |
1da177e4 LT |
1620 | /* |
1621 | * There is no group stop already in progress. | |
a122b341 | 1622 | * We must initiate one now. |
1da177e4 LT |
1623 | */ |
1624 | struct task_struct *t; | |
1625 | ||
a122b341 | 1626 | sig->group_exit_code = signr; |
1da177e4 | 1627 | |
a122b341 ON |
1628 | stop_count = 0; |
1629 | for (t = next_thread(current); t != current; t = next_thread(t)) | |
1da177e4 | 1630 | /* |
a122b341 ON |
1631 | * Setting state to TASK_STOPPED for a group |
1632 | * stop is always done with the siglock held, | |
1633 | * so this check has no races. | |
1da177e4 | 1634 | */ |
a122b341 ON |
1635 | if (!t->exit_state && |
1636 | !(t->state & (TASK_STOPPED|TASK_TRACED))) { | |
1637 | stop_count++; | |
1638 | signal_wake_up(t, 0); | |
1639 | } | |
1640 | sig->group_stop_count = stop_count; | |
1da177e4 LT |
1641 | } |
1642 | ||
dac27f4a ON |
1643 | if (stop_count == 0) |
1644 | sig->flags = SIGNAL_STOP_STOPPED; | |
1645 | current->exit_code = sig->group_exit_code; | |
1646 | __set_current_state(TASK_STOPPED); | |
1647 | ||
1648 | spin_unlock_irq(¤t->sighand->siglock); | |
1da177e4 LT |
1649 | finish_stop(stop_count); |
1650 | return 1; | |
1651 | } | |
1652 | ||
1653 | /* | |
1654 | * Do appropriate magic when group_stop_count > 0. | |
1655 | * We return nonzero if we stopped, after releasing the siglock. | |
1656 | * We return zero if we still hold the siglock and should look | |
1657 | * for another signal without checking group_stop_count again. | |
1658 | */ | |
858119e1 | 1659 | static int handle_group_stop(void) |
1da177e4 LT |
1660 | { |
1661 | int stop_count; | |
1662 | ||
1663 | if (current->signal->group_exit_task == current) { | |
1664 | /* | |
1665 | * Group stop is so we can do a core dump, | |
1666 | * We are the initiating thread, so get on with it. | |
1667 | */ | |
1668 | current->signal->group_exit_task = NULL; | |
1669 | return 0; | |
1670 | } | |
1671 | ||
1672 | if (current->signal->flags & SIGNAL_GROUP_EXIT) | |
1673 | /* | |
1674 | * Group stop is so another thread can do a core dump, | |
1675 | * or else we are racing against a death signal. | |
1676 | * Just punt the stop so we can get the next signal. | |
1677 | */ | |
1678 | return 0; | |
1679 | ||
1680 | /* | |
1681 | * There is a group stop in progress. We stop | |
1682 | * without any associated signal being in our queue. | |
1683 | */ | |
1684 | stop_count = --current->signal->group_stop_count; | |
1685 | if (stop_count == 0) | |
1686 | current->signal->flags = SIGNAL_STOP_STOPPED; | |
1687 | current->exit_code = current->signal->group_exit_code; | |
1688 | set_current_state(TASK_STOPPED); | |
1689 | spin_unlock_irq(¤t->sighand->siglock); | |
1690 | finish_stop(stop_count); | |
1691 | return 1; | |
1692 | } | |
1693 | ||
1694 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | |
1695 | struct pt_regs *regs, void *cookie) | |
1696 | { | |
1697 | sigset_t *mask = ¤t->blocked; | |
1698 | int signr = 0; | |
1699 | ||
fc558a74 RW |
1700 | try_to_freeze(); |
1701 | ||
1da177e4 LT |
1702 | relock: |
1703 | spin_lock_irq(¤t->sighand->siglock); | |
1704 | for (;;) { | |
1705 | struct k_sigaction *ka; | |
1706 | ||
1707 | if (unlikely(current->signal->group_stop_count > 0) && | |
1708 | handle_group_stop()) | |
1709 | goto relock; | |
1710 | ||
1711 | signr = dequeue_signal(current, mask, info); | |
1712 | ||
1713 | if (!signr) | |
1714 | break; /* will return 0 */ | |
1715 | ||
1716 | if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { | |
1717 | ptrace_signal_deliver(regs, cookie); | |
1718 | ||
1719 | /* Let the debugger run. */ | |
1720 | ptrace_stop(signr, signr, info); | |
1721 | ||
e57a5059 | 1722 | /* We're back. Did the debugger cancel the sig? */ |
1da177e4 | 1723 | signr = current->exit_code; |
e57a5059 | 1724 | if (signr == 0) |
1da177e4 LT |
1725 | continue; |
1726 | ||
1727 | current->exit_code = 0; | |
1728 | ||
1729 | /* Update the siginfo structure if the signal has | |
1730 | changed. If the debugger wanted something | |
1731 | specific in the siginfo structure then it should | |
1732 | have updated *info via PTRACE_SETSIGINFO. */ | |
1733 | if (signr != info->si_signo) { | |
1734 | info->si_signo = signr; | |
1735 | info->si_errno = 0; | |
1736 | info->si_code = SI_USER; | |
1737 | info->si_pid = current->parent->pid; | |
1738 | info->si_uid = current->parent->uid; | |
1739 | } | |
1740 | ||
1741 | /* If the (new) signal is now blocked, requeue it. */ | |
1742 | if (sigismember(¤t->blocked, signr)) { | |
1743 | specific_send_sig_info(signr, info, current); | |
1744 | continue; | |
1745 | } | |
1746 | } | |
1747 | ||
1748 | ka = ¤t->sighand->action[signr-1]; | |
1749 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | |
1750 | continue; | |
1751 | if (ka->sa.sa_handler != SIG_DFL) { | |
1752 | /* Run the handler. */ | |
1753 | *return_ka = *ka; | |
1754 | ||
1755 | if (ka->sa.sa_flags & SA_ONESHOT) | |
1756 | ka->sa.sa_handler = SIG_DFL; | |
1757 | ||
1758 | break; /* will return non-zero "signr" value */ | |
1759 | } | |
1760 | ||
1761 | /* | |
1762 | * Now we are doing the default action for this signal. | |
1763 | */ | |
1764 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ | |
1765 | continue; | |
1766 | ||
84d73786 SB |
1767 | /* |
1768 | * Init of a pid space gets no signals it doesn't want from | |
1769 | * within that pid space. It can of course get signals from | |
1770 | * its parent pid space. | |
1771 | */ | |
1772 | if (current == child_reaper(current)) | |
1da177e4 LT |
1773 | continue; |
1774 | ||
1775 | if (sig_kernel_stop(signr)) { | |
1776 | /* | |
1777 | * The default action is to stop all threads in | |
1778 | * the thread group. The job control signals | |
1779 | * do nothing in an orphaned pgrp, but SIGSTOP | |
1780 | * always works. Note that siglock needs to be | |
1781 | * dropped during the call to is_orphaned_pgrp() | |
1782 | * because of lock ordering with tasklist_lock. | |
1783 | * This allows an intervening SIGCONT to be posted. | |
1784 | * We need to check for that and bail out if necessary. | |
1785 | */ | |
1786 | if (signr != SIGSTOP) { | |
1787 | spin_unlock_irq(¤t->sighand->siglock); | |
1788 | ||
1789 | /* signals can be posted during this window */ | |
1790 | ||
3e7cd6c4 | 1791 | if (is_current_pgrp_orphaned()) |
1da177e4 LT |
1792 | goto relock; |
1793 | ||
1794 | spin_lock_irq(¤t->sighand->siglock); | |
1795 | } | |
1796 | ||
1797 | if (likely(do_signal_stop(signr))) { | |
1798 | /* It released the siglock. */ | |
1799 | goto relock; | |
1800 | } | |
1801 | ||
1802 | /* | |
1803 | * We didn't actually stop, due to a race | |
1804 | * with SIGCONT or something like that. | |
1805 | */ | |
1806 | continue; | |
1807 | } | |
1808 | ||
1809 | spin_unlock_irq(¤t->sighand->siglock); | |
1810 | ||
1811 | /* | |
1812 | * Anything else is fatal, maybe with a core dump. | |
1813 | */ | |
1814 | current->flags |= PF_SIGNALED; | |
1815 | if (sig_kernel_coredump(signr)) { | |
1816 | /* | |
1817 | * If it was able to dump core, this kills all | |
1818 | * other threads in the group and synchronizes with | |
1819 | * their demise. If we lost the race with another | |
1820 | * thread getting here, it set group_exit_code | |
1821 | * first and our do_group_exit call below will use | |
1822 | * that value and ignore the one we pass it. | |
1823 | */ | |
1824 | do_coredump((long)signr, signr, regs); | |
1825 | } | |
1826 | ||
1827 | /* | |
1828 | * Death signals, no core dump. | |
1829 | */ | |
1830 | do_group_exit(signr); | |
1831 | /* NOTREACHED */ | |
1832 | } | |
1833 | spin_unlock_irq(¤t->sighand->siglock); | |
1834 | return signr; | |
1835 | } | |
1836 | ||
1da177e4 LT |
1837 | EXPORT_SYMBOL(recalc_sigpending); |
1838 | EXPORT_SYMBOL_GPL(dequeue_signal); | |
1839 | EXPORT_SYMBOL(flush_signals); | |
1840 | EXPORT_SYMBOL(force_sig); | |
1da177e4 LT |
1841 | EXPORT_SYMBOL(kill_proc); |
1842 | EXPORT_SYMBOL(ptrace_notify); | |
1843 | EXPORT_SYMBOL(send_sig); | |
1844 | EXPORT_SYMBOL(send_sig_info); | |
1845 | EXPORT_SYMBOL(sigprocmask); | |
1846 | EXPORT_SYMBOL(block_all_signals); | |
1847 | EXPORT_SYMBOL(unblock_all_signals); | |
1848 | ||
1849 | ||
1850 | /* | |
1851 | * System call entry points. | |
1852 | */ | |
1853 | ||
1854 | asmlinkage long sys_restart_syscall(void) | |
1855 | { | |
1856 | struct restart_block *restart = ¤t_thread_info()->restart_block; | |
1857 | return restart->fn(restart); | |
1858 | } | |
1859 | ||
1860 | long do_no_restart_syscall(struct restart_block *param) | |
1861 | { | |
1862 | return -EINTR; | |
1863 | } | |
1864 | ||
1865 | /* | |
1866 | * We don't need to get the kernel lock - this is all local to this | |
1867 | * particular thread.. (and that's good, because this is _heavily_ | |
1868 | * used by various programs) | |
1869 | */ | |
1870 | ||
1871 | /* | |
1872 | * This is also useful for kernel threads that want to temporarily | |
1873 | * (or permanently) block certain signals. | |
1874 | * | |
1875 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel | |
1876 | * interface happily blocks "unblockable" signals like SIGKILL | |
1877 | * and friends. | |
1878 | */ | |
1879 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | |
1880 | { | |
1881 | int error; | |
1da177e4 LT |
1882 | |
1883 | spin_lock_irq(¤t->sighand->siglock); | |
a26fd335 ON |
1884 | if (oldset) |
1885 | *oldset = current->blocked; | |
1886 | ||
1da177e4 LT |
1887 | error = 0; |
1888 | switch (how) { | |
1889 | case SIG_BLOCK: | |
1890 | sigorsets(¤t->blocked, ¤t->blocked, set); | |
1891 | break; | |
1892 | case SIG_UNBLOCK: | |
1893 | signandsets(¤t->blocked, ¤t->blocked, set); | |
1894 | break; | |
1895 | case SIG_SETMASK: | |
1896 | current->blocked = *set; | |
1897 | break; | |
1898 | default: | |
1899 | error = -EINVAL; | |
1900 | } | |
1901 | recalc_sigpending(); | |
1902 | spin_unlock_irq(¤t->sighand->siglock); | |
a26fd335 | 1903 | |
1da177e4 LT |
1904 | return error; |
1905 | } | |
1906 | ||
1907 | asmlinkage long | |
1908 | sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) | |
1909 | { | |
1910 | int error = -EINVAL; | |
1911 | sigset_t old_set, new_set; | |
1912 | ||
1913 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
1914 | if (sigsetsize != sizeof(sigset_t)) | |
1915 | goto out; | |
1916 | ||
1917 | if (set) { | |
1918 | error = -EFAULT; | |
1919 | if (copy_from_user(&new_set, set, sizeof(*set))) | |
1920 | goto out; | |
1921 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
1922 | ||
1923 | error = sigprocmask(how, &new_set, &old_set); | |
1924 | if (error) | |
1925 | goto out; | |
1926 | if (oset) | |
1927 | goto set_old; | |
1928 | } else if (oset) { | |
1929 | spin_lock_irq(¤t->sighand->siglock); | |
1930 | old_set = current->blocked; | |
1931 | spin_unlock_irq(¤t->sighand->siglock); | |
1932 | ||
1933 | set_old: | |
1934 | error = -EFAULT; | |
1935 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | |
1936 | goto out; | |
1937 | } | |
1938 | error = 0; | |
1939 | out: | |
1940 | return error; | |
1941 | } | |
1942 | ||
1943 | long do_sigpending(void __user *set, unsigned long sigsetsize) | |
1944 | { | |
1945 | long error = -EINVAL; | |
1946 | sigset_t pending; | |
1947 | ||
1948 | if (sigsetsize > sizeof(sigset_t)) | |
1949 | goto out; | |
1950 | ||
1951 | spin_lock_irq(¤t->sighand->siglock); | |
1952 | sigorsets(&pending, ¤t->pending.signal, | |
1953 | ¤t->signal->shared_pending.signal); | |
1954 | spin_unlock_irq(¤t->sighand->siglock); | |
1955 | ||
1956 | /* Outside the lock because only this thread touches it. */ | |
1957 | sigandsets(&pending, ¤t->blocked, &pending); | |
1958 | ||
1959 | error = -EFAULT; | |
1960 | if (!copy_to_user(set, &pending, sigsetsize)) | |
1961 | error = 0; | |
1962 | ||
1963 | out: | |
1964 | return error; | |
1965 | } | |
1966 | ||
1967 | asmlinkage long | |
1968 | sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) | |
1969 | { | |
1970 | return do_sigpending(set, sigsetsize); | |
1971 | } | |
1972 | ||
1973 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER | |
1974 | ||
1975 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |
1976 | { | |
1977 | int err; | |
1978 | ||
1979 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) | |
1980 | return -EFAULT; | |
1981 | if (from->si_code < 0) | |
1982 | return __copy_to_user(to, from, sizeof(siginfo_t)) | |
1983 | ? -EFAULT : 0; | |
1984 | /* | |
1985 | * If you change siginfo_t structure, please be sure | |
1986 | * this code is fixed accordingly. | |
1987 | * It should never copy any pad contained in the structure | |
1988 | * to avoid security leaks, but must copy the generic | |
1989 | * 3 ints plus the relevant union member. | |
1990 | */ | |
1991 | err = __put_user(from->si_signo, &to->si_signo); | |
1992 | err |= __put_user(from->si_errno, &to->si_errno); | |
1993 | err |= __put_user((short)from->si_code, &to->si_code); | |
1994 | switch (from->si_code & __SI_MASK) { | |
1995 | case __SI_KILL: | |
1996 | err |= __put_user(from->si_pid, &to->si_pid); | |
1997 | err |= __put_user(from->si_uid, &to->si_uid); | |
1998 | break; | |
1999 | case __SI_TIMER: | |
2000 | err |= __put_user(from->si_tid, &to->si_tid); | |
2001 | err |= __put_user(from->si_overrun, &to->si_overrun); | |
2002 | err |= __put_user(from->si_ptr, &to->si_ptr); | |
2003 | break; | |
2004 | case __SI_POLL: | |
2005 | err |= __put_user(from->si_band, &to->si_band); | |
2006 | err |= __put_user(from->si_fd, &to->si_fd); | |
2007 | break; | |
2008 | case __SI_FAULT: | |
2009 | err |= __put_user(from->si_addr, &to->si_addr); | |
2010 | #ifdef __ARCH_SI_TRAPNO | |
2011 | err |= __put_user(from->si_trapno, &to->si_trapno); | |
2012 | #endif | |
2013 | break; | |
2014 | case __SI_CHLD: | |
2015 | err |= __put_user(from->si_pid, &to->si_pid); | |
2016 | err |= __put_user(from->si_uid, &to->si_uid); | |
2017 | err |= __put_user(from->si_status, &to->si_status); | |
2018 | err |= __put_user(from->si_utime, &to->si_utime); | |
2019 | err |= __put_user(from->si_stime, &to->si_stime); | |
2020 | break; | |
2021 | case __SI_RT: /* This is not generated by the kernel as of now. */ | |
2022 | case __SI_MESGQ: /* But this is */ | |
2023 | err |= __put_user(from->si_pid, &to->si_pid); | |
2024 | err |= __put_user(from->si_uid, &to->si_uid); | |
2025 | err |= __put_user(from->si_ptr, &to->si_ptr); | |
2026 | break; | |
2027 | default: /* this is just in case for now ... */ | |
2028 | err |= __put_user(from->si_pid, &to->si_pid); | |
2029 | err |= __put_user(from->si_uid, &to->si_uid); | |
2030 | break; | |
2031 | } | |
2032 | return err; | |
2033 | } | |
2034 | ||
2035 | #endif | |
2036 | ||
2037 | asmlinkage long | |
2038 | sys_rt_sigtimedwait(const sigset_t __user *uthese, | |
2039 | siginfo_t __user *uinfo, | |
2040 | const struct timespec __user *uts, | |
2041 | size_t sigsetsize) | |
2042 | { | |
2043 | int ret, sig; | |
2044 | sigset_t these; | |
2045 | struct timespec ts; | |
2046 | siginfo_t info; | |
2047 | long timeout = 0; | |
2048 | ||
2049 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2050 | if (sigsetsize != sizeof(sigset_t)) | |
2051 | return -EINVAL; | |
2052 | ||
2053 | if (copy_from_user(&these, uthese, sizeof(these))) | |
2054 | return -EFAULT; | |
2055 | ||
2056 | /* | |
2057 | * Invert the set of allowed signals to get those we | |
2058 | * want to block. | |
2059 | */ | |
2060 | sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
2061 | signotset(&these); | |
2062 | ||
2063 | if (uts) { | |
2064 | if (copy_from_user(&ts, uts, sizeof(ts))) | |
2065 | return -EFAULT; | |
2066 | if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 | |
2067 | || ts.tv_sec < 0) | |
2068 | return -EINVAL; | |
2069 | } | |
2070 | ||
2071 | spin_lock_irq(¤t->sighand->siglock); | |
2072 | sig = dequeue_signal(current, &these, &info); | |
2073 | if (!sig) { | |
2074 | timeout = MAX_SCHEDULE_TIMEOUT; | |
2075 | if (uts) | |
2076 | timeout = (timespec_to_jiffies(&ts) | |
2077 | + (ts.tv_sec || ts.tv_nsec)); | |
2078 | ||
2079 | if (timeout) { | |
2080 | /* None ready -- temporarily unblock those we're | |
2081 | * interested while we are sleeping in so that we'll | |
2082 | * be awakened when they arrive. */ | |
2083 | current->real_blocked = current->blocked; | |
2084 | sigandsets(¤t->blocked, ¤t->blocked, &these); | |
2085 | recalc_sigpending(); | |
2086 | spin_unlock_irq(¤t->sighand->siglock); | |
2087 | ||
75bcc8c5 | 2088 | timeout = schedule_timeout_interruptible(timeout); |
1da177e4 | 2089 | |
1da177e4 LT |
2090 | spin_lock_irq(¤t->sighand->siglock); |
2091 | sig = dequeue_signal(current, &these, &info); | |
2092 | current->blocked = current->real_blocked; | |
2093 | siginitset(¤t->real_blocked, 0); | |
2094 | recalc_sigpending(); | |
2095 | } | |
2096 | } | |
2097 | spin_unlock_irq(¤t->sighand->siglock); | |
2098 | ||
2099 | if (sig) { | |
2100 | ret = sig; | |
2101 | if (uinfo) { | |
2102 | if (copy_siginfo_to_user(uinfo, &info)) | |
2103 | ret = -EFAULT; | |
2104 | } | |
2105 | } else { | |
2106 | ret = -EAGAIN; | |
2107 | if (timeout) | |
2108 | ret = -EINTR; | |
2109 | } | |
2110 | ||
2111 | return ret; | |
2112 | } | |
2113 | ||
2114 | asmlinkage long | |
2115 | sys_kill(int pid, int sig) | |
2116 | { | |
2117 | struct siginfo info; | |
2118 | ||
2119 | info.si_signo = sig; | |
2120 | info.si_errno = 0; | |
2121 | info.si_code = SI_USER; | |
2122 | info.si_pid = current->tgid; | |
2123 | info.si_uid = current->uid; | |
2124 | ||
2125 | return kill_something_info(sig, &info, pid); | |
2126 | } | |
2127 | ||
6dd69f10 | 2128 | static int do_tkill(int tgid, int pid, int sig) |
1da177e4 | 2129 | { |
1da177e4 | 2130 | int error; |
6dd69f10 | 2131 | struct siginfo info; |
1da177e4 LT |
2132 | struct task_struct *p; |
2133 | ||
6dd69f10 | 2134 | error = -ESRCH; |
1da177e4 LT |
2135 | info.si_signo = sig; |
2136 | info.si_errno = 0; | |
2137 | info.si_code = SI_TKILL; | |
2138 | info.si_pid = current->tgid; | |
2139 | info.si_uid = current->uid; | |
2140 | ||
2141 | read_lock(&tasklist_lock); | |
2142 | p = find_task_by_pid(pid); | |
6dd69f10 | 2143 | if (p && (tgid <= 0 || p->tgid == tgid)) { |
1da177e4 LT |
2144 | error = check_kill_permission(sig, &info, p); |
2145 | /* | |
2146 | * The null signal is a permissions and process existence | |
2147 | * probe. No signal is actually delivered. | |
2148 | */ | |
2149 | if (!error && sig && p->sighand) { | |
2150 | spin_lock_irq(&p->sighand->siglock); | |
2151 | handle_stop_signal(sig, p); | |
2152 | error = specific_send_sig_info(sig, &info, p); | |
2153 | spin_unlock_irq(&p->sighand->siglock); | |
2154 | } | |
2155 | } | |
2156 | read_unlock(&tasklist_lock); | |
6dd69f10 | 2157 | |
1da177e4 LT |
2158 | return error; |
2159 | } | |
2160 | ||
6dd69f10 VL |
2161 | /** |
2162 | * sys_tgkill - send signal to one specific thread | |
2163 | * @tgid: the thread group ID of the thread | |
2164 | * @pid: the PID of the thread | |
2165 | * @sig: signal to be sent | |
2166 | * | |
72fd4a35 | 2167 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
6dd69f10 VL |
2168 | * exists but it's not belonging to the target process anymore. This |
2169 | * method solves the problem of threads exiting and PIDs getting reused. | |
2170 | */ | |
2171 | asmlinkage long sys_tgkill(int tgid, int pid, int sig) | |
2172 | { | |
2173 | /* This is only valid for single tasks */ | |
2174 | if (pid <= 0 || tgid <= 0) | |
2175 | return -EINVAL; | |
2176 | ||
2177 | return do_tkill(tgid, pid, sig); | |
2178 | } | |
2179 | ||
1da177e4 LT |
2180 | /* |
2181 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | |
2182 | */ | |
2183 | asmlinkage long | |
2184 | sys_tkill(int pid, int sig) | |
2185 | { | |
1da177e4 LT |
2186 | /* This is only valid for single tasks */ |
2187 | if (pid <= 0) | |
2188 | return -EINVAL; | |
2189 | ||
6dd69f10 | 2190 | return do_tkill(0, pid, sig); |
1da177e4 LT |
2191 | } |
2192 | ||
2193 | asmlinkage long | |
2194 | sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) | |
2195 | { | |
2196 | siginfo_t info; | |
2197 | ||
2198 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) | |
2199 | return -EFAULT; | |
2200 | ||
2201 | /* Not even root can pretend to send signals from the kernel. | |
2202 | Nor can they impersonate a kill(), which adds source info. */ | |
2203 | if (info.si_code >= 0) | |
2204 | return -EPERM; | |
2205 | info.si_signo = sig; | |
2206 | ||
2207 | /* POSIX.1b doesn't mention process groups. */ | |
2208 | return kill_proc_info(sig, &info, pid); | |
2209 | } | |
2210 | ||
88531f72 | 2211 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
1da177e4 LT |
2212 | { |
2213 | struct k_sigaction *k; | |
71fabd5e | 2214 | sigset_t mask; |
1da177e4 | 2215 | |
7ed20e1a | 2216 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
1da177e4 LT |
2217 | return -EINVAL; |
2218 | ||
2219 | k = ¤t->sighand->action[sig-1]; | |
2220 | ||
2221 | spin_lock_irq(¤t->sighand->siglock); | |
2222 | if (signal_pending(current)) { | |
2223 | /* | |
2224 | * If there might be a fatal signal pending on multiple | |
2225 | * threads, make sure we take it before changing the action. | |
2226 | */ | |
2227 | spin_unlock_irq(¤t->sighand->siglock); | |
2228 | return -ERESTARTNOINTR; | |
2229 | } | |
2230 | ||
2231 | if (oact) | |
2232 | *oact = *k; | |
2233 | ||
2234 | if (act) { | |
9ac95f2f ON |
2235 | sigdelsetmask(&act->sa.sa_mask, |
2236 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
88531f72 | 2237 | *k = *act; |
1da177e4 LT |
2238 | /* |
2239 | * POSIX 3.3.1.3: | |
2240 | * "Setting a signal action to SIG_IGN for a signal that is | |
2241 | * pending shall cause the pending signal to be discarded, | |
2242 | * whether or not it is blocked." | |
2243 | * | |
2244 | * "Setting a signal action to SIG_DFL for a signal that is | |
2245 | * pending and whose default action is to ignore the signal | |
2246 | * (for example, SIGCHLD), shall cause the pending signal to | |
2247 | * be discarded, whether or not it is blocked" | |
2248 | */ | |
2249 | if (act->sa.sa_handler == SIG_IGN || | |
88531f72 | 2250 | (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { |
1da177e4 | 2251 | struct task_struct *t = current; |
71fabd5e GA |
2252 | sigemptyset(&mask); |
2253 | sigaddset(&mask, sig); | |
2254 | rm_from_queue_full(&mask, &t->signal->shared_pending); | |
1da177e4 | 2255 | do { |
71fabd5e | 2256 | rm_from_queue_full(&mask, &t->pending); |
1da177e4 LT |
2257 | recalc_sigpending_tsk(t); |
2258 | t = next_thread(t); | |
2259 | } while (t != current); | |
1da177e4 | 2260 | } |
1da177e4 LT |
2261 | } |
2262 | ||
2263 | spin_unlock_irq(¤t->sighand->siglock); | |
2264 | return 0; | |
2265 | } | |
2266 | ||
2267 | int | |
2268 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) | |
2269 | { | |
2270 | stack_t oss; | |
2271 | int error; | |
2272 | ||
2273 | if (uoss) { | |
2274 | oss.ss_sp = (void __user *) current->sas_ss_sp; | |
2275 | oss.ss_size = current->sas_ss_size; | |
2276 | oss.ss_flags = sas_ss_flags(sp); | |
2277 | } | |
2278 | ||
2279 | if (uss) { | |
2280 | void __user *ss_sp; | |
2281 | size_t ss_size; | |
2282 | int ss_flags; | |
2283 | ||
2284 | error = -EFAULT; | |
2285 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) | |
2286 | || __get_user(ss_sp, &uss->ss_sp) | |
2287 | || __get_user(ss_flags, &uss->ss_flags) | |
2288 | || __get_user(ss_size, &uss->ss_size)) | |
2289 | goto out; | |
2290 | ||
2291 | error = -EPERM; | |
2292 | if (on_sig_stack(sp)) | |
2293 | goto out; | |
2294 | ||
2295 | error = -EINVAL; | |
2296 | /* | |
2297 | * | |
2298 | * Note - this code used to test ss_flags incorrectly | |
2299 | * old code may have been written using ss_flags==0 | |
2300 | * to mean ss_flags==SS_ONSTACK (as this was the only | |
2301 | * way that worked) - this fix preserves that older | |
2302 | * mechanism | |
2303 | */ | |
2304 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) | |
2305 | goto out; | |
2306 | ||
2307 | if (ss_flags == SS_DISABLE) { | |
2308 | ss_size = 0; | |
2309 | ss_sp = NULL; | |
2310 | } else { | |
2311 | error = -ENOMEM; | |
2312 | if (ss_size < MINSIGSTKSZ) | |
2313 | goto out; | |
2314 | } | |
2315 | ||
2316 | current->sas_ss_sp = (unsigned long) ss_sp; | |
2317 | current->sas_ss_size = ss_size; | |
2318 | } | |
2319 | ||
2320 | if (uoss) { | |
2321 | error = -EFAULT; | |
2322 | if (copy_to_user(uoss, &oss, sizeof(oss))) | |
2323 | goto out; | |
2324 | } | |
2325 | ||
2326 | error = 0; | |
2327 | out: | |
2328 | return error; | |
2329 | } | |
2330 | ||
2331 | #ifdef __ARCH_WANT_SYS_SIGPENDING | |
2332 | ||
2333 | asmlinkage long | |
2334 | sys_sigpending(old_sigset_t __user *set) | |
2335 | { | |
2336 | return do_sigpending(set, sizeof(*set)); | |
2337 | } | |
2338 | ||
2339 | #endif | |
2340 | ||
2341 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | |
2342 | /* Some platforms have their own version with special arguments others | |
2343 | support only sys_rt_sigprocmask. */ | |
2344 | ||
2345 | asmlinkage long | |
2346 | sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) | |
2347 | { | |
2348 | int error; | |
2349 | old_sigset_t old_set, new_set; | |
2350 | ||
2351 | if (set) { | |
2352 | error = -EFAULT; | |
2353 | if (copy_from_user(&new_set, set, sizeof(*set))) | |
2354 | goto out; | |
2355 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
2356 | ||
2357 | spin_lock_irq(¤t->sighand->siglock); | |
2358 | old_set = current->blocked.sig[0]; | |
2359 | ||
2360 | error = 0; | |
2361 | switch (how) { | |
2362 | default: | |
2363 | error = -EINVAL; | |
2364 | break; | |
2365 | case SIG_BLOCK: | |
2366 | sigaddsetmask(¤t->blocked, new_set); | |
2367 | break; | |
2368 | case SIG_UNBLOCK: | |
2369 | sigdelsetmask(¤t->blocked, new_set); | |
2370 | break; | |
2371 | case SIG_SETMASK: | |
2372 | current->blocked.sig[0] = new_set; | |
2373 | break; | |
2374 | } | |
2375 | ||
2376 | recalc_sigpending(); | |
2377 | spin_unlock_irq(¤t->sighand->siglock); | |
2378 | if (error) | |
2379 | goto out; | |
2380 | if (oset) | |
2381 | goto set_old; | |
2382 | } else if (oset) { | |
2383 | old_set = current->blocked.sig[0]; | |
2384 | set_old: | |
2385 | error = -EFAULT; | |
2386 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | |
2387 | goto out; | |
2388 | } | |
2389 | error = 0; | |
2390 | out: | |
2391 | return error; | |
2392 | } | |
2393 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | |
2394 | ||
2395 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION | |
2396 | asmlinkage long | |
2397 | sys_rt_sigaction(int sig, | |
2398 | const struct sigaction __user *act, | |
2399 | struct sigaction __user *oact, | |
2400 | size_t sigsetsize) | |
2401 | { | |
2402 | struct k_sigaction new_sa, old_sa; | |
2403 | int ret = -EINVAL; | |
2404 | ||
2405 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2406 | if (sigsetsize != sizeof(sigset_t)) | |
2407 | goto out; | |
2408 | ||
2409 | if (act) { | |
2410 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) | |
2411 | return -EFAULT; | |
2412 | } | |
2413 | ||
2414 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | |
2415 | ||
2416 | if (!ret && oact) { | |
2417 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) | |
2418 | return -EFAULT; | |
2419 | } | |
2420 | out: | |
2421 | return ret; | |
2422 | } | |
2423 | #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ | |
2424 | ||
2425 | #ifdef __ARCH_WANT_SYS_SGETMASK | |
2426 | ||
2427 | /* | |
2428 | * For backwards compatibility. Functionality superseded by sigprocmask. | |
2429 | */ | |
2430 | asmlinkage long | |
2431 | sys_sgetmask(void) | |
2432 | { | |
2433 | /* SMP safe */ | |
2434 | return current->blocked.sig[0]; | |
2435 | } | |
2436 | ||
2437 | asmlinkage long | |
2438 | sys_ssetmask(int newmask) | |
2439 | { | |
2440 | int old; | |
2441 | ||
2442 | spin_lock_irq(¤t->sighand->siglock); | |
2443 | old = current->blocked.sig[0]; | |
2444 | ||
2445 | siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| | |
2446 | sigmask(SIGSTOP))); | |
2447 | recalc_sigpending(); | |
2448 | spin_unlock_irq(¤t->sighand->siglock); | |
2449 | ||
2450 | return old; | |
2451 | } | |
2452 | #endif /* __ARCH_WANT_SGETMASK */ | |
2453 | ||
2454 | #ifdef __ARCH_WANT_SYS_SIGNAL | |
2455 | /* | |
2456 | * For backwards compatibility. Functionality superseded by sigaction. | |
2457 | */ | |
2458 | asmlinkage unsigned long | |
2459 | sys_signal(int sig, __sighandler_t handler) | |
2460 | { | |
2461 | struct k_sigaction new_sa, old_sa; | |
2462 | int ret; | |
2463 | ||
2464 | new_sa.sa.sa_handler = handler; | |
2465 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | |
c70d3d70 | 2466 | sigemptyset(&new_sa.sa.sa_mask); |
1da177e4 LT |
2467 | |
2468 | ret = do_sigaction(sig, &new_sa, &old_sa); | |
2469 | ||
2470 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; | |
2471 | } | |
2472 | #endif /* __ARCH_WANT_SYS_SIGNAL */ | |
2473 | ||
2474 | #ifdef __ARCH_WANT_SYS_PAUSE | |
2475 | ||
2476 | asmlinkage long | |
2477 | sys_pause(void) | |
2478 | { | |
2479 | current->state = TASK_INTERRUPTIBLE; | |
2480 | schedule(); | |
2481 | return -ERESTARTNOHAND; | |
2482 | } | |
2483 | ||
2484 | #endif | |
2485 | ||
150256d8 DW |
2486 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND |
2487 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) | |
2488 | { | |
2489 | sigset_t newset; | |
2490 | ||
2491 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2492 | if (sigsetsize != sizeof(sigset_t)) | |
2493 | return -EINVAL; | |
2494 | ||
2495 | if (copy_from_user(&newset, unewset, sizeof(newset))) | |
2496 | return -EFAULT; | |
2497 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
2498 | ||
2499 | spin_lock_irq(¤t->sighand->siglock); | |
2500 | current->saved_sigmask = current->blocked; | |
2501 | current->blocked = newset; | |
2502 | recalc_sigpending(); | |
2503 | spin_unlock_irq(¤t->sighand->siglock); | |
2504 | ||
2505 | current->state = TASK_INTERRUPTIBLE; | |
2506 | schedule(); | |
2507 | set_thread_flag(TIF_RESTORE_SIGMASK); | |
2508 | return -ERESTARTNOHAND; | |
2509 | } | |
2510 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ | |
2511 | ||
f269fdd1 DH |
2512 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) |
2513 | { | |
2514 | return NULL; | |
2515 | } | |
2516 | ||
1da177e4 LT |
2517 | void __init signals_init(void) |
2518 | { | |
0a31bd5f | 2519 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
1da177e4 | 2520 | } |