coredump: zap_threads: comments && use while_each_thread()
[deliverable/linux.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
1da177e4
LT
13#include <linux/slab.h>
14#include <linux/module.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
7ed20e1a 23#include <linux/signal.h>
fba2afaa 24#include <linux/signalfd.h>
c59ede7b 25#include <linux/capability.h>
7dfb7103 26#include <linux/freezer.h>
84d73786
SB
27#include <linux/pid_namespace.h>
28#include <linux/nsproxy.h>
29
1da177e4
LT
30#include <asm/param.h>
31#include <asm/uaccess.h>
32#include <asm/unistd.h>
33#include <asm/siginfo.h>
e1396065 34#include "audit.h" /* audit_signal_info() */
1da177e4
LT
35
36/*
37 * SLAB caches for signal bits.
38 */
39
e18b890b 40static struct kmem_cache *sigqueue_cachep;
1da177e4 41
93585eea
PE
42static int __sig_ignored(struct task_struct *t, int sig)
43{
44 void __user *handler;
45
46 /* Is it explicitly or implicitly ignored? */
47
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig));
51}
1da177e4
LT
52
53static int sig_ignored(struct task_struct *t, int sig)
54{
1da177e4
LT
55 /*
56 * Tracers always want to know about signals..
57 */
58 if (t->ptrace & PT_PTRACED)
59 return 0;
60
61 /*
62 * Blocked signals are never ignored, since the
63 * signal handler may change by the time it is
64 * unblocked.
65 */
325d22df 66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
1da177e4
LT
67 return 0;
68
93585eea 69 return __sig_ignored(t, sig);
1da177e4
LT
70}
71
72/*
73 * Re-calculate pending state from the set of locally pending
74 * signals, globally pending signals, and blocked signals.
75 */
76static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
77{
78 unsigned long ready;
79 long i;
80
81 switch (_NSIG_WORDS) {
82 default:
83 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
84 ready |= signal->sig[i] &~ blocked->sig[i];
85 break;
86
87 case 4: ready = signal->sig[3] &~ blocked->sig[3];
88 ready |= signal->sig[2] &~ blocked->sig[2];
89 ready |= signal->sig[1] &~ blocked->sig[1];
90 ready |= signal->sig[0] &~ blocked->sig[0];
91 break;
92
93 case 2: ready = signal->sig[1] &~ blocked->sig[1];
94 ready |= signal->sig[0] &~ blocked->sig[0];
95 break;
96
97 case 1: ready = signal->sig[0] &~ blocked->sig[0];
98 }
99 return ready != 0;
100}
101
102#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
103
7bb44ade 104static int recalc_sigpending_tsk(struct task_struct *t)
1da177e4
LT
105{
106 if (t->signal->group_stop_count > 0 ||
107 PENDING(&t->pending, &t->blocked) ||
7bb44ade 108 PENDING(&t->signal->shared_pending, &t->blocked)) {
1da177e4 109 set_tsk_thread_flag(t, TIF_SIGPENDING);
7bb44ade
RM
110 return 1;
111 }
b74d0deb
RM
112 /*
113 * We must never clear the flag in another thread, or in current
114 * when it's possible the current syscall is returning -ERESTART*.
115 * So we don't clear it here, and only callers who know they should do.
116 */
7bb44ade
RM
117 return 0;
118}
119
120/*
121 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
122 * This is superfluous when called on current, the wakeup is a harmless no-op.
123 */
124void recalc_sigpending_and_wake(struct task_struct *t)
125{
126 if (recalc_sigpending_tsk(t))
127 signal_wake_up(t, 0);
1da177e4
LT
128}
129
130void recalc_sigpending(void)
131{
cc5f916e 132 if (!recalc_sigpending_tsk(current) && !freezing(current))
b74d0deb
RM
133 clear_thread_flag(TIF_SIGPENDING);
134
1da177e4
LT
135}
136
137/* Given the mask, find the first available signal that should be serviced. */
138
fba2afaa 139int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
140{
141 unsigned long i, *s, *m, x;
142 int sig = 0;
143
144 s = pending->signal.sig;
145 m = mask->sig;
146 switch (_NSIG_WORDS) {
147 default:
148 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
149 if ((x = *s &~ *m) != 0) {
150 sig = ffz(~x) + i*_NSIG_BPW + 1;
151 break;
152 }
153 break;
154
155 case 2: if ((x = s[0] &~ m[0]) != 0)
156 sig = 1;
157 else if ((x = s[1] &~ m[1]) != 0)
158 sig = _NSIG_BPW + 1;
159 else
160 break;
161 sig += ffz(~x);
162 break;
163
164 case 1: if ((x = *s &~ *m) != 0)
165 sig = ffz(~x) + 1;
166 break;
167 }
168
169 return sig;
170}
171
dd0fc66f 172static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
1da177e4
LT
173 int override_rlimit)
174{
175 struct sigqueue *q = NULL;
10b1fbdb 176 struct user_struct *user;
1da177e4 177
10b1fbdb
LT
178 /*
179 * In order to avoid problems with "switch_user()", we want to make
180 * sure that the compiler doesn't re-load "t->user"
181 */
182 user = t->user;
183 barrier();
184 atomic_inc(&user->sigpending);
1da177e4 185 if (override_rlimit ||
10b1fbdb 186 atomic_read(&user->sigpending) <=
1da177e4
LT
187 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
188 q = kmem_cache_alloc(sigqueue_cachep, flags);
189 if (unlikely(q == NULL)) {
10b1fbdb 190 atomic_dec(&user->sigpending);
1da177e4
LT
191 } else {
192 INIT_LIST_HEAD(&q->list);
193 q->flags = 0;
10b1fbdb 194 q->user = get_uid(user);
1da177e4
LT
195 }
196 return(q);
197}
198
514a01b8 199static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
200{
201 if (q->flags & SIGQUEUE_PREALLOC)
202 return;
203 atomic_dec(&q->user->sigpending);
204 free_uid(q->user);
205 kmem_cache_free(sigqueue_cachep, q);
206}
207
6a14c5c9 208void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
209{
210 struct sigqueue *q;
211
212 sigemptyset(&queue->signal);
213 while (!list_empty(&queue->list)) {
214 q = list_entry(queue->list.next, struct sigqueue , list);
215 list_del_init(&q->list);
216 __sigqueue_free(q);
217 }
218}
219
220/*
221 * Flush all pending signals for a task.
222 */
c81addc9 223void flush_signals(struct task_struct *t)
1da177e4
LT
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&t->sighand->siglock, flags);
f5264481 228 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1da177e4
LT
229 flush_sigqueue(&t->pending);
230 flush_sigqueue(&t->signal->shared_pending);
231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
232}
233
cbaffba1
ON
234static void __flush_itimer_signals(struct sigpending *pending)
235{
236 sigset_t signal, retain;
237 struct sigqueue *q, *n;
238
239 signal = pending->signal;
240 sigemptyset(&retain);
241
242 list_for_each_entry_safe(q, n, &pending->list, list) {
243 int sig = q->info.si_signo;
244
245 if (likely(q->info.si_code != SI_TIMER)) {
246 sigaddset(&retain, sig);
247 } else {
248 sigdelset(&signal, sig);
249 list_del_init(&q->list);
250 __sigqueue_free(q);
251 }
252 }
253
254 sigorsets(&pending->signal, &signal, &retain);
255}
256
257void flush_itimer_signals(void)
258{
259 struct task_struct *tsk = current;
260 unsigned long flags;
261
262 spin_lock_irqsave(&tsk->sighand->siglock, flags);
263 __flush_itimer_signals(&tsk->pending);
264 __flush_itimer_signals(&tsk->signal->shared_pending);
265 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
266}
267
10ab825b
ON
268void ignore_signals(struct task_struct *t)
269{
270 int i;
271
272 for (i = 0; i < _NSIG; ++i)
273 t->sighand->action[i].sa.sa_handler = SIG_IGN;
274
275 flush_signals(t);
276}
277
1da177e4
LT
278/*
279 * Flush all handlers for a task.
280 */
281
282void
283flush_signal_handlers(struct task_struct *t, int force_default)
284{
285 int i;
286 struct k_sigaction *ka = &t->sighand->action[0];
287 for (i = _NSIG ; i != 0 ; i--) {
288 if (force_default || ka->sa.sa_handler != SIG_IGN)
289 ka->sa.sa_handler = SIG_DFL;
290 ka->sa.sa_flags = 0;
291 sigemptyset(&ka->sa.sa_mask);
292 ka++;
293 }
294}
295
abd4f750
MAS
296int unhandled_signal(struct task_struct *tsk, int sig)
297{
b460cbc5 298 if (is_global_init(tsk))
abd4f750
MAS
299 return 1;
300 if (tsk->ptrace & PT_PTRACED)
301 return 0;
302 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
303 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
304}
305
1da177e4
LT
306
307/* Notify the system that a driver wants to block all signals for this
308 * process, and wants to be notified if any signals at all were to be
309 * sent/acted upon. If the notifier routine returns non-zero, then the
310 * signal will be acted upon after all. If the notifier routine returns 0,
311 * then then signal will be blocked. Only one block per process is
312 * allowed. priv is a pointer to private data that the notifier routine
313 * can use to determine if the signal should be blocked or not. */
314
315void
316block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
317{
318 unsigned long flags;
319
320 spin_lock_irqsave(&current->sighand->siglock, flags);
321 current->notifier_mask = mask;
322 current->notifier_data = priv;
323 current->notifier = notifier;
324 spin_unlock_irqrestore(&current->sighand->siglock, flags);
325}
326
327/* Notify the system that blocking has ended. */
328
329void
330unblock_all_signals(void)
331{
332 unsigned long flags;
333
334 spin_lock_irqsave(&current->sighand->siglock, flags);
335 current->notifier = NULL;
336 current->notifier_data = NULL;
337 recalc_sigpending();
338 spin_unlock_irqrestore(&current->sighand->siglock, flags);
339}
340
100360f0 341static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
1da177e4
LT
342{
343 struct sigqueue *q, *first = NULL;
1da177e4 344
1da177e4
LT
345 /*
346 * Collect the siginfo appropriate to this signal. Check if
347 * there is another siginfo for the same signal.
348 */
349 list_for_each_entry(q, &list->list, list) {
350 if (q->info.si_signo == sig) {
d4434207
ON
351 if (first)
352 goto still_pending;
1da177e4
LT
353 first = q;
354 }
355 }
d4434207
ON
356
357 sigdelset(&list->signal, sig);
358
1da177e4 359 if (first) {
d4434207 360still_pending:
1da177e4
LT
361 list_del_init(&first->list);
362 copy_siginfo(info, &first->info);
363 __sigqueue_free(first);
1da177e4 364 } else {
1da177e4
LT
365 /* Ok, it wasn't in the queue. This must be
366 a fast-pathed signal or we must have been
367 out of queue space. So zero out the info.
368 */
1da177e4
LT
369 info->si_signo = sig;
370 info->si_errno = 0;
371 info->si_code = 0;
372 info->si_pid = 0;
373 info->si_uid = 0;
374 }
1da177e4
LT
375}
376
377static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
378 siginfo_t *info)
379{
27d91e07 380 int sig = next_signal(pending, mask);
1da177e4 381
1da177e4
LT
382 if (sig) {
383 if (current->notifier) {
384 if (sigismember(current->notifier_mask, sig)) {
385 if (!(current->notifier)(current->notifier_data)) {
386 clear_thread_flag(TIF_SIGPENDING);
387 return 0;
388 }
389 }
390 }
391
100360f0 392 collect_signal(sig, pending, info);
1da177e4 393 }
1da177e4
LT
394
395 return sig;
396}
397
398/*
399 * Dequeue a signal and return the element to the caller, which is
400 * expected to free it.
401 *
402 * All callers have to hold the siglock.
403 */
404int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
405{
c5363d03 406 int signr;
caec4e8d
BH
407
408 /* We only dequeue private signals from ourselves, we don't let
409 * signalfd steal them
410 */
b8fceee1 411 signr = __dequeue_signal(&tsk->pending, mask, info);
8bfd9a7a 412 if (!signr) {
1da177e4
LT
413 signr = __dequeue_signal(&tsk->signal->shared_pending,
414 mask, info);
8bfd9a7a
TG
415 /*
416 * itimer signal ?
417 *
418 * itimers are process shared and we restart periodic
419 * itimers in the signal delivery path to prevent DoS
420 * attacks in the high resolution timer case. This is
421 * compliant with the old way of self restarting
422 * itimers, as the SIGALRM is a legacy signal and only
423 * queued once. Changing the restart behaviour to
424 * restart the timer in the signal dequeue path is
425 * reducing the timer noise on heavy loaded !highres
426 * systems too.
427 */
428 if (unlikely(signr == SIGALRM)) {
429 struct hrtimer *tmr = &tsk->signal->real_timer;
430
431 if (!hrtimer_is_queued(tmr) &&
432 tsk->signal->it_real_incr.tv64 != 0) {
433 hrtimer_forward(tmr, tmr->base->get_time(),
434 tsk->signal->it_real_incr);
435 hrtimer_restart(tmr);
436 }
437 }
438 }
c5363d03 439
b8fceee1 440 recalc_sigpending();
c5363d03
PE
441 if (!signr)
442 return 0;
443
444 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
445 /*
446 * Set a marker that we have dequeued a stop signal. Our
447 * caller might release the siglock and then the pending
448 * stop signal it is about to process is no longer in the
449 * pending bitmasks, but must still be cleared by a SIGCONT
450 * (and overruled by a SIGKILL). So those cases clear this
451 * shared flag after we've set it. Note that this flag may
452 * remain set after the signal we return is ignored or
453 * handled. That doesn't matter because its only purpose
454 * is to alert stop-signal processing code when another
455 * processor has come along and cleared the flag.
456 */
92413d77 457 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
8bfd9a7a 458 }
c5363d03 459 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
1da177e4
LT
460 /*
461 * Release the siglock to ensure proper locking order
462 * of timer locks outside of siglocks. Note, we leave
463 * irqs disabled here, since the posix-timers code is
464 * about to disable them again anyway.
465 */
466 spin_unlock(&tsk->sighand->siglock);
467 do_schedule_next_timer(info);
468 spin_lock(&tsk->sighand->siglock);
469 }
470 return signr;
471}
472
473/*
474 * Tell a process that it has a new active signal..
475 *
476 * NOTE! we rely on the previous spin_lock to
477 * lock interrupts for us! We can only be called with
478 * "siglock" held, and the local interrupt must
479 * have been disabled when that got acquired!
480 *
481 * No need to set need_resched since signal event passing
482 * goes through ->blocked
483 */
484void signal_wake_up(struct task_struct *t, int resume)
485{
486 unsigned int mask;
487
488 set_tsk_thread_flag(t, TIF_SIGPENDING);
489
490 /*
f021a3c2
MW
491 * For SIGKILL, we want to wake it up in the stopped/traced/killable
492 * case. We don't check t->state here because there is a race with it
1da177e4
LT
493 * executing another processor and just now entering stopped state.
494 * By using wake_up_state, we ensure the process will wake up and
495 * handle its death signal.
496 */
497 mask = TASK_INTERRUPTIBLE;
498 if (resume)
f021a3c2 499 mask |= TASK_WAKEKILL;
1da177e4
LT
500 if (!wake_up_state(t, mask))
501 kick_process(t);
502}
503
71fabd5e
GA
504/*
505 * Remove signals in mask from the pending set and queue.
506 * Returns 1 if any signals were found.
507 *
508 * All callers must be holding the siglock.
509 *
510 * This version takes a sigset mask and looks at all signals,
511 * not just those in the first mask word.
512 */
513static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
514{
515 struct sigqueue *q, *n;
516 sigset_t m;
517
518 sigandsets(&m, mask, &s->signal);
519 if (sigisemptyset(&m))
520 return 0;
521
522 signandsets(&s->signal, &s->signal, mask);
523 list_for_each_entry_safe(q, n, &s->list, list) {
524 if (sigismember(mask, q->info.si_signo)) {
525 list_del_init(&q->list);
526 __sigqueue_free(q);
527 }
528 }
529 return 1;
530}
1da177e4
LT
531/*
532 * Remove signals in mask from the pending set and queue.
533 * Returns 1 if any signals were found.
534 *
535 * All callers must be holding the siglock.
536 */
537static int rm_from_queue(unsigned long mask, struct sigpending *s)
538{
539 struct sigqueue *q, *n;
540
541 if (!sigtestsetmask(&s->signal, mask))
542 return 0;
543
544 sigdelsetmask(&s->signal, mask);
545 list_for_each_entry_safe(q, n, &s->list, list) {
546 if (q->info.si_signo < SIGRTMIN &&
547 (mask & sigmask(q->info.si_signo))) {
548 list_del_init(&q->list);
549 __sigqueue_free(q);
550 }
551 }
552 return 1;
553}
554
555/*
556 * Bad permissions for sending the signal
557 */
558static int check_kill_permission(int sig, struct siginfo *info,
559 struct task_struct *t)
560{
2e2ba22e 561 struct pid *sid;
3b5e9e53
ON
562 int error;
563
7ed20e1a 564 if (!valid_signal(sig))
3b5e9e53
ON
565 return -EINVAL;
566
567 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
568 return 0;
e54dc243 569
3b5e9e53
ON
570 error = audit_signal_info(sig, t); /* Let audit system see the signal */
571 if (error)
1da177e4 572 return error;
3b5e9e53 573
2e2ba22e
ON
574 if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
575 (current->uid ^ t->suid) && (current->uid ^ t->uid) &&
576 !capable(CAP_KILL)) {
577 switch (sig) {
578 case SIGCONT:
2e2ba22e 579 sid = task_session(t);
2e2ba22e
ON
580 /*
581 * We don't return the error if sid == NULL. The
582 * task was unhashed, the caller must notice this.
583 */
584 if (!sid || sid == task_session(current))
585 break;
586 default:
587 return -EPERM;
588 }
589 }
c2f0c7c3 590
e54dc243 591 return security_task_kill(t, info, sig, 0);
1da177e4
LT
592}
593
594/* forward decl */
a1d5e21e 595static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
1da177e4
LT
596
597/*
7e695a5e
ON
598 * Handle magic process-wide effects of stop/continue signals. Unlike
599 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
600 * time regardless of blocking, ignoring, or handling. This does the
601 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
602 * signals. The process stop is done as a signal action for SIG_DFL.
603 *
604 * Returns true if the signal should be actually delivered, otherwise
605 * it should be dropped.
1da177e4 606 */
7e695a5e 607static int prepare_signal(int sig, struct task_struct *p)
1da177e4 608{
ad16a460 609 struct signal_struct *signal = p->signal;
1da177e4
LT
610 struct task_struct *t;
611
7e695a5e 612 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
1da177e4 613 /*
7e695a5e 614 * The process is in the middle of dying, nothing to do.
1da177e4 615 */
7e695a5e 616 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
617 /*
618 * This is a stop signal. Remove SIGCONT from all queues.
619 */
ad16a460 620 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
1da177e4
LT
621 t = p;
622 do {
623 rm_from_queue(sigmask(SIGCONT), &t->pending);
ad16a460 624 } while_each_thread(p, t);
1da177e4 625 } else if (sig == SIGCONT) {
fc321d2e 626 unsigned int why;
1da177e4
LT
627 /*
628 * Remove all stop signals from all queues,
629 * and wake all threads.
630 */
ad16a460 631 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
1da177e4
LT
632 t = p;
633 do {
634 unsigned int state;
635 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1da177e4
LT
636 /*
637 * If there is a handler for SIGCONT, we must make
638 * sure that no thread returns to user mode before
639 * we post the signal, in case it was the only
640 * thread eligible to run the signal handler--then
641 * it must not do anything between resuming and
642 * running the handler. With the TIF_SIGPENDING
643 * flag set, the thread will pause and acquire the
644 * siglock that we hold now and until we've queued
fc321d2e 645 * the pending signal.
1da177e4
LT
646 *
647 * Wake up the stopped thread _after_ setting
648 * TIF_SIGPENDING
649 */
f021a3c2 650 state = __TASK_STOPPED;
1da177e4
LT
651 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
652 set_tsk_thread_flag(t, TIF_SIGPENDING);
653 state |= TASK_INTERRUPTIBLE;
654 }
655 wake_up_state(t, state);
ad16a460 656 } while_each_thread(p, t);
1da177e4 657
fc321d2e
ON
658 /*
659 * Notify the parent with CLD_CONTINUED if we were stopped.
660 *
661 * If we were in the middle of a group stop, we pretend it
662 * was already finished, and then continued. Since SIGCHLD
663 * doesn't queue we report only CLD_STOPPED, as if the next
664 * CLD_CONTINUED was dropped.
665 */
666 why = 0;
ad16a460 667 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 668 why |= SIGNAL_CLD_CONTINUED;
ad16a460 669 else if (signal->group_stop_count)
fc321d2e
ON
670 why |= SIGNAL_CLD_STOPPED;
671
672 if (why) {
021e1ae3
ON
673 /*
674 * The first thread which returns from finish_stop()
675 * will take ->siglock, notice SIGNAL_CLD_MASK, and
676 * notify its parent. See get_signal_to_deliver().
677 */
ad16a460
ON
678 signal->flags = why | SIGNAL_STOP_CONTINUED;
679 signal->group_stop_count = 0;
680 signal->group_exit_code = 0;
1da177e4
LT
681 } else {
682 /*
683 * We are not stopped, but there could be a stop
684 * signal in the middle of being processed after
685 * being removed from the queue. Clear that too.
686 */
ad16a460 687 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
1da177e4 688 }
1da177e4 689 }
7e695a5e
ON
690
691 return !sig_ignored(p, sig);
1da177e4
LT
692}
693
71f11dc0
ON
694/*
695 * Test if P wants to take SIG. After we've checked all threads with this,
696 * it's equivalent to finding no threads not blocking SIG. Any threads not
697 * blocking SIG were ruled out because they are not running and already
698 * have pending signals. Such threads will dequeue from the shared queue
699 * as soon as they're available, so putting the signal on the shared queue
700 * will be equivalent to sending it to one such thread.
701 */
702static inline int wants_signal(int sig, struct task_struct *p)
703{
704 if (sigismember(&p->blocked, sig))
705 return 0;
706 if (p->flags & PF_EXITING)
707 return 0;
708 if (sig == SIGKILL)
709 return 1;
710 if (task_is_stopped_or_traced(p))
711 return 0;
712 return task_curr(p) || !signal_pending(p);
713}
714
5fcd835b 715static void complete_signal(int sig, struct task_struct *p, int group)
71f11dc0
ON
716{
717 struct signal_struct *signal = p->signal;
718 struct task_struct *t;
719
720 /*
721 * Now find a thread we can wake up to take the signal off the queue.
722 *
723 * If the main thread wants the signal, it gets first crack.
724 * Probably the least surprising to the average bear.
725 */
726 if (wants_signal(sig, p))
727 t = p;
5fcd835b 728 else if (!group || thread_group_empty(p))
71f11dc0
ON
729 /*
730 * There is just one thread and it does not need to be woken.
731 * It will dequeue unblocked signals before it runs again.
732 */
733 return;
734 else {
735 /*
736 * Otherwise try to find a suitable thread.
737 */
738 t = signal->curr_target;
739 while (!wants_signal(sig, t)) {
740 t = next_thread(t);
741 if (t == signal->curr_target)
742 /*
743 * No thread needs to be woken.
744 * Any eligible threads will see
745 * the signal in the queue soon.
746 */
747 return;
748 }
749 signal->curr_target = t;
750 }
751
752 /*
753 * Found a killable thread. If the signal will be fatal,
754 * then start taking the whole group down immediately.
755 */
fae5fa44
ON
756 if (sig_fatal(p, sig) &&
757 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
71f11dc0
ON
758 !sigismember(&t->real_blocked, sig) &&
759 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
760 /*
761 * This signal will be fatal to the whole group.
762 */
763 if (!sig_kernel_coredump(sig)) {
764 /*
765 * Start a group exit and wake everybody up.
766 * This way we don't have other threads
767 * running and doing things after a slower
768 * thread has the fatal signal pending.
769 */
770 signal->flags = SIGNAL_GROUP_EXIT;
771 signal->group_exit_code = sig;
772 signal->group_stop_count = 0;
773 t = p;
774 do {
775 sigaddset(&t->pending.signal, SIGKILL);
776 signal_wake_up(t, 1);
777 } while_each_thread(p, t);
778 return;
779 }
780 }
781
782 /*
783 * The signal is already in the shared-pending queue.
784 * Tell the chosen thread to wake up and dequeue it.
785 */
786 signal_wake_up(t, sig == SIGKILL);
787 return;
788}
789
af7fff9c
PE
790static inline int legacy_queue(struct sigpending *signals, int sig)
791{
792 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
793}
794
1da177e4 795static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
2ca3515a 796 int group)
1da177e4 797{
2ca3515a 798 struct sigpending *pending;
6e65acba 799 struct sigqueue *q;
1da177e4 800
6e65acba 801 assert_spin_locked(&t->sighand->siglock);
7e695a5e
ON
802 if (!prepare_signal(sig, t))
803 return 0;
2ca3515a
ON
804
805 pending = group ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
806 /*
807 * Short-circuit ignored signals and support queuing
808 * exactly one non-rt signal, so that we can get more
809 * detailed information about the cause of the signal.
810 */
7e695a5e 811 if (legacy_queue(pending, sig))
2acb024d 812 return 0;
1da177e4
LT
813 /*
814 * fast-pathed signals for kernel-internal things like SIGSTOP
815 * or SIGKILL.
816 */
b67a1b9e 817 if (info == SEND_SIG_FORCED)
1da177e4
LT
818 goto out_set;
819
820 /* Real-time signals must be queued if sent by sigqueue, or
821 some other real-time mechanism. It is implementation
822 defined whether kill() does so. We attempt to do so, on
823 the principle of least surprise, but since kill is not
824 allowed to fail with EAGAIN when low on memory we just
825 make sure at least one signal gets delivered and don't
826 pass on the info struct. */
827
828 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
621d3121 829 (is_si_special(info) ||
1da177e4
LT
830 info->si_code >= 0)));
831 if (q) {
2ca3515a 832 list_add_tail(&q->list, &pending->list);
1da177e4 833 switch ((unsigned long) info) {
b67a1b9e 834 case (unsigned long) SEND_SIG_NOINFO:
1da177e4
LT
835 q->info.si_signo = sig;
836 q->info.si_errno = 0;
837 q->info.si_code = SI_USER;
b488893a 838 q->info.si_pid = task_pid_vnr(current);
1da177e4
LT
839 q->info.si_uid = current->uid;
840 break;
b67a1b9e 841 case (unsigned long) SEND_SIG_PRIV:
1da177e4
LT
842 q->info.si_signo = sig;
843 q->info.si_errno = 0;
844 q->info.si_code = SI_KERNEL;
845 q->info.si_pid = 0;
846 q->info.si_uid = 0;
847 break;
848 default:
849 copy_siginfo(&q->info, info);
850 break;
851 }
621d3121
ON
852 } else if (!is_si_special(info)) {
853 if (sig >= SIGRTMIN && info->si_code != SI_USER)
1da177e4
LT
854 /*
855 * Queue overflow, abort. We may abort if the signal was rt
856 * and sent by user using something other than kill().
857 */
858 return -EAGAIN;
1da177e4
LT
859 }
860
861out_set:
53c30337 862 signalfd_notify(t, sig);
2ca3515a 863 sigaddset(&pending->signal, sig);
4cd4b6d4
PE
864 complete_signal(sig, t, group);
865 return 0;
1da177e4
LT
866}
867
45807a1d
IM
868int print_fatal_signals;
869
870static void print_fatal_signal(struct pt_regs *regs, int signr)
871{
872 printk("%s/%d: potentially unexpected fatal signal %d.\n",
ba25f9dc 873 current->comm, task_pid_nr(current), signr);
45807a1d 874
ca5cd877 875#if defined(__i386__) && !defined(__arch_um__)
65ea5b03 876 printk("code at %08lx: ", regs->ip);
45807a1d
IM
877 {
878 int i;
879 for (i = 0; i < 16; i++) {
880 unsigned char insn;
881
65ea5b03 882 __get_user(insn, (unsigned char *)(regs->ip + i));
45807a1d
IM
883 printk("%02x ", insn);
884 }
885 }
886#endif
887 printk("\n");
888 show_regs(regs);
889}
890
891static int __init setup_print_fatal_signals(char *str)
892{
893 get_option (&str, &print_fatal_signals);
894
895 return 1;
896}
897
898__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 899
4cd4b6d4
PE
900int
901__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
902{
903 return send_signal(sig, info, p, 1);
904}
905
1da177e4
LT
906static int
907specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
908{
4cd4b6d4 909 return send_signal(sig, info, t, 0);
1da177e4
LT
910}
911
912/*
913 * Force a signal that the process can't ignore: if necessary
914 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
915 *
916 * Note: If we unblock the signal, we always reset it to SIG_DFL,
917 * since we do not want to have a signal handler that was blocked
918 * be invoked when user space had explicitly blocked it.
919 *
80fe728d
ON
920 * We don't want to have recursive SIGSEGV's etc, for example,
921 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 922 */
1da177e4
LT
923int
924force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
925{
926 unsigned long int flags;
ae74c3b6
LT
927 int ret, blocked, ignored;
928 struct k_sigaction *action;
1da177e4
LT
929
930 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
931 action = &t->sighand->action[sig-1];
932 ignored = action->sa.sa_handler == SIG_IGN;
933 blocked = sigismember(&t->blocked, sig);
934 if (blocked || ignored) {
935 action->sa.sa_handler = SIG_DFL;
936 if (blocked) {
937 sigdelset(&t->blocked, sig);
7bb44ade 938 recalc_sigpending_and_wake(t);
ae74c3b6 939 }
1da177e4 940 }
80fe728d
ON
941 if (action->sa.sa_handler == SIG_DFL)
942 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1da177e4
LT
943 ret = specific_send_sig_info(sig, info, t);
944 spin_unlock_irqrestore(&t->sighand->siglock, flags);
945
946 return ret;
947}
948
949void
950force_sig_specific(int sig, struct task_struct *t)
951{
b0423a0d 952 force_sig_info(sig, SEND_SIG_FORCED, t);
1da177e4
LT
953}
954
1da177e4
LT
955/*
956 * Nuke all other threads in the group.
957 */
958void zap_other_threads(struct task_struct *p)
959{
960 struct task_struct *t;
961
1da177e4
LT
962 p->signal->group_stop_count = 0;
963
1da177e4
LT
964 for (t = next_thread(p); t != p; t = next_thread(t)) {
965 /*
966 * Don't bother with already dead threads
967 */
968 if (t->exit_state)
969 continue;
970
30e0fca6 971 /* SIGKILL will be handled before any pending SIGSTOP */
1da177e4 972 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
973 signal_wake_up(t, 1);
974 }
975}
976
b5606c2d 977int __fatal_signal_pending(struct task_struct *tsk)
f776d12d
MW
978{
979 return sigismember(&tsk->pending.signal, SIGKILL);
980}
13f09b95 981EXPORT_SYMBOL(__fatal_signal_pending);
f776d12d 982
f63ee72e
ON
983struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
984{
985 struct sighand_struct *sighand;
986
1406f2d3 987 rcu_read_lock();
f63ee72e
ON
988 for (;;) {
989 sighand = rcu_dereference(tsk->sighand);
990 if (unlikely(sighand == NULL))
991 break;
992
993 spin_lock_irqsave(&sighand->siglock, *flags);
994 if (likely(sighand == tsk->sighand))
995 break;
996 spin_unlock_irqrestore(&sighand->siglock, *flags);
997 }
1406f2d3 998 rcu_read_unlock();
f63ee72e
ON
999
1000 return sighand;
1001}
1002
1da177e4
LT
1003int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1004{
1005 unsigned long flags;
1006 int ret;
1007
1008 ret = check_kill_permission(sig, info, p);
f63ee72e
ON
1009
1010 if (!ret && sig) {
1011 ret = -ESRCH;
1012 if (lock_task_sighand(p, &flags)) {
1013 ret = __group_send_sig_info(sig, info, p);
1014 unlock_task_sighand(p, &flags);
2d89c929 1015 }
1da177e4
LT
1016 }
1017
1018 return ret;
1019}
1020
1021/*
146a505d 1022 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4
LT
1023 * control characters do (^C, ^Z etc)
1024 */
1025
c4b92fc1 1026int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1da177e4
LT
1027{
1028 struct task_struct *p = NULL;
1029 int retval, success;
1030
1da177e4
LT
1031 success = 0;
1032 retval = -ESRCH;
c4b92fc1 1033 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1da177e4
LT
1034 int err = group_send_sig_info(sig, info, p);
1035 success |= !err;
1036 retval = err;
c4b92fc1 1037 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
1038 return success ? 0 : retval;
1039}
1040
c4b92fc1 1041int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1da177e4 1042{
d36174bc 1043 int error = -ESRCH;
1da177e4
LT
1044 struct task_struct *p;
1045
e56d0903 1046 rcu_read_lock();
d36174bc 1047retry:
c4b92fc1 1048 p = pid_task(pid, PIDTYPE_PID);
d36174bc 1049 if (p) {
1da177e4 1050 error = group_send_sig_info(sig, info, p);
d36174bc
ON
1051 if (unlikely(error == -ESRCH))
1052 /*
1053 * The task was unhashed in between, try again.
1054 * If it is dead, pid_task() will return NULL,
1055 * if we race with de_thread() it will find the
1056 * new leader.
1057 */
1058 goto retry;
1059 }
e56d0903 1060 rcu_read_unlock();
6ca25b55 1061
1da177e4
LT
1062 return error;
1063}
1064
c3de4b38
MW
1065int
1066kill_proc_info(int sig, struct siginfo *info, pid_t pid)
c4b92fc1
EB
1067{
1068 int error;
1069 rcu_read_lock();
b488893a 1070 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1071 rcu_read_unlock();
1072 return error;
1073}
1074
2425c08b
EB
1075/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1076int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
8f95dc58 1077 uid_t uid, uid_t euid, u32 secid)
46113830
HW
1078{
1079 int ret = -EINVAL;
1080 struct task_struct *p;
1081
1082 if (!valid_signal(sig))
1083 return ret;
1084
1085 read_lock(&tasklist_lock);
2425c08b 1086 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1087 if (!p) {
1088 ret = -ESRCH;
1089 goto out_unlock;
1090 }
0811af28 1091 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
46113830
HW
1092 && (euid != p->suid) && (euid != p->uid)
1093 && (uid != p->suid) && (uid != p->uid)) {
1094 ret = -EPERM;
1095 goto out_unlock;
1096 }
8f95dc58
DQ
1097 ret = security_task_kill(p, info, sig, secid);
1098 if (ret)
1099 goto out_unlock;
46113830
HW
1100 if (sig && p->sighand) {
1101 unsigned long flags;
1102 spin_lock_irqsave(&p->sighand->siglock, flags);
1103 ret = __group_send_sig_info(sig, info, p);
1104 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1105 }
1106out_unlock:
1107 read_unlock(&tasklist_lock);
1108 return ret;
1109}
2425c08b 1110EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1da177e4
LT
1111
1112/*
1113 * kill_something_info() interprets pid in interesting ways just like kill(2).
1114 *
1115 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1116 * is probably wrong. Should make it like BSD or SYSV.
1117 */
1118
1119static int kill_something_info(int sig, struct siginfo *info, int pid)
1120{
8d42db18 1121 int ret;
d5df763b
PE
1122
1123 if (pid > 0) {
1124 rcu_read_lock();
1125 ret = kill_pid_info(sig, info, find_vpid(pid));
1126 rcu_read_unlock();
1127 return ret;
1128 }
1129
1130 read_lock(&tasklist_lock);
1131 if (pid != -1) {
1132 ret = __kill_pgrp_info(sig, info,
1133 pid ? find_vpid(-pid) : task_pgrp(current));
1134 } else {
1da177e4
LT
1135 int retval = 0, count = 0;
1136 struct task_struct * p;
1137
1da177e4 1138 for_each_process(p) {
bac0abd6 1139 if (p->pid > 1 && !same_thread_group(p, current)) {
1da177e4
LT
1140 int err = group_send_sig_info(sig, info, p);
1141 ++count;
1142 if (err != -EPERM)
1143 retval = err;
1144 }
1145 }
8d42db18 1146 ret = count ? retval : -ESRCH;
1da177e4 1147 }
d5df763b
PE
1148 read_unlock(&tasklist_lock);
1149
8d42db18 1150 return ret;
1da177e4
LT
1151}
1152
1153/*
1154 * These are for backward compatibility with the rest of the kernel source.
1155 */
1156
1157/*
08d2c30c 1158 * The caller must ensure the task can't exit.
1da177e4
LT
1159 */
1160int
1161send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1162{
1163 int ret;
1164 unsigned long flags;
1165
1166 /*
1167 * Make sure legacy kernel users don't send in bad values
1168 * (normal paths check this in check_kill_permission).
1169 */
7ed20e1a 1170 if (!valid_signal(sig))
1da177e4
LT
1171 return -EINVAL;
1172
1da177e4
LT
1173 spin_lock_irqsave(&p->sighand->siglock, flags);
1174 ret = specific_send_sig_info(sig, info, p);
1175 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1da177e4
LT
1176 return ret;
1177}
1178
b67a1b9e
ON
1179#define __si_special(priv) \
1180 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1181
1da177e4
LT
1182int
1183send_sig(int sig, struct task_struct *p, int priv)
1184{
b67a1b9e 1185 return send_sig_info(sig, __si_special(priv), p);
1da177e4
LT
1186}
1187
1da177e4
LT
1188void
1189force_sig(int sig, struct task_struct *p)
1190{
b67a1b9e 1191 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4
LT
1192}
1193
1194/*
1195 * When things go south during signal handling, we
1196 * will force a SIGSEGV. And if the signal that caused
1197 * the problem was already a SIGSEGV, we'll want to
1198 * make sure we don't even try to deliver the signal..
1199 */
1200int
1201force_sigsegv(int sig, struct task_struct *p)
1202{
1203 if (sig == SIGSEGV) {
1204 unsigned long flags;
1205 spin_lock_irqsave(&p->sighand->siglock, flags);
1206 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1207 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1208 }
1209 force_sig(SIGSEGV, p);
1210 return 0;
1211}
1212
c4b92fc1
EB
1213int kill_pgrp(struct pid *pid, int sig, int priv)
1214{
146a505d
PE
1215 int ret;
1216
1217 read_lock(&tasklist_lock);
1218 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1219 read_unlock(&tasklist_lock);
1220
1221 return ret;
c4b92fc1
EB
1222}
1223EXPORT_SYMBOL(kill_pgrp);
1224
1225int kill_pid(struct pid *pid, int sig, int priv)
1226{
1227 return kill_pid_info(sig, __si_special(priv), pid);
1228}
1229EXPORT_SYMBOL(kill_pid);
1230
1da177e4
LT
1231int
1232kill_proc(pid_t pid, int sig, int priv)
1233{
b488893a
PE
1234 int ret;
1235
1236 rcu_read_lock();
1237 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1238 rcu_read_unlock();
1239 return ret;
1da177e4
LT
1240}
1241
1242/*
1243 * These functions support sending signals using preallocated sigqueue
1244 * structures. This is needed "because realtime applications cannot
1245 * afford to lose notifications of asynchronous events, like timer
1246 * expirations or I/O completions". In the case of Posix Timers
1247 * we allocate the sigqueue structure from the timer_create. If this
1248 * allocation fails we are able to report the failure to the application
1249 * with an EAGAIN error.
1250 */
1251
1252struct sigqueue *sigqueue_alloc(void)
1253{
1254 struct sigqueue *q;
1255
1256 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1257 q->flags |= SIGQUEUE_PREALLOC;
1258 return(q);
1259}
1260
1261void sigqueue_free(struct sigqueue *q)
1262{
1263 unsigned long flags;
60187d27
ON
1264 spinlock_t *lock = &current->sighand->siglock;
1265
1da177e4
LT
1266 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1267 /*
c8e85b4f
ON
1268 * We must hold ->siglock while testing q->list
1269 * to serialize with collect_signal() or with
da7978b0 1270 * __exit_signal()->flush_sigqueue().
1da177e4 1271 */
60187d27 1272 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1273 q->flags &= ~SIGQUEUE_PREALLOC;
1274 /*
1275 * If it is queued it will be freed when dequeued,
1276 * like the "regular" sigqueue.
1277 */
60187d27 1278 if (!list_empty(&q->list))
c8e85b4f 1279 q = NULL;
60187d27
ON
1280 spin_unlock_irqrestore(lock, flags);
1281
c8e85b4f
ON
1282 if (q)
1283 __sigqueue_free(q);
1da177e4
LT
1284}
1285
ac5c2153 1286int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
9e3bd6c3 1287{
e62e6650 1288 int sig = q->info.si_signo;
2ca3515a 1289 struct sigpending *pending;
e62e6650
ON
1290 unsigned long flags;
1291 int ret;
2ca3515a 1292
4cd4b6d4 1293 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1294
1295 ret = -1;
1296 if (!likely(lock_task_sighand(t, &flags)))
1297 goto ret;
1298
7e695a5e
ON
1299 ret = 1; /* the signal is ignored */
1300 if (!prepare_signal(sig, t))
e62e6650
ON
1301 goto out;
1302
1303 ret = 0;
9e3bd6c3
PE
1304 if (unlikely(!list_empty(&q->list))) {
1305 /*
1306 * If an SI_TIMER entry is already queue just increment
1307 * the overrun count.
1308 */
9e3bd6c3
PE
1309 BUG_ON(q->info.si_code != SI_TIMER);
1310 q->info.si_overrun++;
e62e6650 1311 goto out;
9e3bd6c3
PE
1312 }
1313
9e3bd6c3 1314 signalfd_notify(t, sig);
2ca3515a 1315 pending = group ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
1316 list_add_tail(&q->list, &pending->list);
1317 sigaddset(&pending->signal, sig);
4cd4b6d4 1318 complete_signal(sig, t, group);
e62e6650
ON
1319out:
1320 unlock_task_sighand(t, &flags);
1321ret:
1322 return ret;
9e3bd6c3
PE
1323}
1324
1da177e4
LT
1325/*
1326 * Wake up any threads in the parent blocked in wait* syscalls.
1327 */
1328static inline void __wake_up_parent(struct task_struct *p,
1329 struct task_struct *parent)
1330{
1331 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1332}
1333
1334/*
1335 * Let a parent know about the death of a child.
1336 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1337 */
1338
1339void do_notify_parent(struct task_struct *tsk, int sig)
1340{
1341 struct siginfo info;
1342 unsigned long flags;
1343 struct sighand_struct *psig;
1344
1345 BUG_ON(sig == -1);
1346
1347 /* do_notify_parent_cldstop should have been called instead. */
e1abb39c 1348 BUG_ON(task_is_stopped_or_traced(tsk));
1da177e4
LT
1349
1350 BUG_ON(!tsk->ptrace &&
1351 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1352
1353 info.si_signo = sig;
1354 info.si_errno = 0;
b488893a
PE
1355 /*
1356 * we are under tasklist_lock here so our parent is tied to
1357 * us and cannot exit and release its namespace.
1358 *
1359 * the only it can is to switch its nsproxy with sys_unshare,
1360 * bu uncharing pid namespaces is not allowed, so we'll always
1361 * see relevant namespace
1362 *
1363 * write_lock() currently calls preempt_disable() which is the
1364 * same as rcu_read_lock(), but according to Oleg, this is not
1365 * correct to rely on this
1366 */
1367 rcu_read_lock();
1368 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1369 rcu_read_unlock();
1370
1da177e4
LT
1371 info.si_uid = tsk->uid;
1372
1373 /* FIXME: find out whether or not this is supposed to be c*time. */
1374 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1375 tsk->signal->utime));
1376 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1377 tsk->signal->stime));
1378
1379 info.si_status = tsk->exit_code & 0x7f;
1380 if (tsk->exit_code & 0x80)
1381 info.si_code = CLD_DUMPED;
1382 else if (tsk->exit_code & 0x7f)
1383 info.si_code = CLD_KILLED;
1384 else {
1385 info.si_code = CLD_EXITED;
1386 info.si_status = tsk->exit_code >> 8;
1387 }
1388
1389 psig = tsk->parent->sighand;
1390 spin_lock_irqsave(&psig->siglock, flags);
7ed0175a 1391 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
1392 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1393 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1394 /*
1395 * We are exiting and our parent doesn't care. POSIX.1
1396 * defines special semantics for setting SIGCHLD to SIG_IGN
1397 * or setting the SA_NOCLDWAIT flag: we should be reaped
1398 * automatically and not left for our parent's wait4 call.
1399 * Rather than having the parent do it as a magic kind of
1400 * signal handler, we just set this to tell do_exit that we
1401 * can be cleaned up without becoming a zombie. Note that
1402 * we still call __wake_up_parent in this case, because a
1403 * blocked sys_wait4 might now return -ECHILD.
1404 *
1405 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1406 * is implementation-defined: we do (if you don't want
1407 * it, just use SIG_IGN instead).
1408 */
1409 tsk->exit_signal = -1;
1410 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1411 sig = 0;
1412 }
7ed20e1a 1413 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1414 __group_send_sig_info(sig, &info, tsk->parent);
1415 __wake_up_parent(tsk, tsk->parent);
1416 spin_unlock_irqrestore(&psig->siglock, flags);
1417}
1418
a1d5e21e 1419static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1da177e4
LT
1420{
1421 struct siginfo info;
1422 unsigned long flags;
bc505a47 1423 struct task_struct *parent;
1da177e4
LT
1424 struct sighand_struct *sighand;
1425
a1d5e21e 1426 if (tsk->ptrace & PT_PTRACED)
bc505a47
ON
1427 parent = tsk->parent;
1428 else {
1429 tsk = tsk->group_leader;
1430 parent = tsk->real_parent;
1431 }
1432
1da177e4
LT
1433 info.si_signo = SIGCHLD;
1434 info.si_errno = 0;
b488893a
PE
1435 /*
1436 * see comment in do_notify_parent() abot the following 3 lines
1437 */
1438 rcu_read_lock();
1439 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1440 rcu_read_unlock();
1441
1da177e4
LT
1442 info.si_uid = tsk->uid;
1443
1444 /* FIXME: find out whether or not this is supposed to be c*time. */
1445 info.si_utime = cputime_to_jiffies(tsk->utime);
1446 info.si_stime = cputime_to_jiffies(tsk->stime);
1447
1448 info.si_code = why;
1449 switch (why) {
1450 case CLD_CONTINUED:
1451 info.si_status = SIGCONT;
1452 break;
1453 case CLD_STOPPED:
1454 info.si_status = tsk->signal->group_exit_code & 0x7f;
1455 break;
1456 case CLD_TRAPPED:
1457 info.si_status = tsk->exit_code & 0x7f;
1458 break;
1459 default:
1460 BUG();
1461 }
1462
1463 sighand = parent->sighand;
1464 spin_lock_irqsave(&sighand->siglock, flags);
1465 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1466 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1467 __group_send_sig_info(SIGCHLD, &info, parent);
1468 /*
1469 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1470 */
1471 __wake_up_parent(tsk, parent);
1472 spin_unlock_irqrestore(&sighand->siglock, flags);
1473}
1474
d5f70c00
ON
1475static inline int may_ptrace_stop(void)
1476{
1477 if (!likely(current->ptrace & PT_PTRACED))
1478 return 0;
d5f70c00
ON
1479 /*
1480 * Are we in the middle of do_coredump?
1481 * If so and our tracer is also part of the coredump stopping
1482 * is a deadlock situation, and pointless because our tracer
1483 * is dead so don't allow us to stop.
1484 * If SIGKILL was already sent before the caller unlocked
1485 * ->siglock we must see ->core_waiters != 0. Otherwise it
1486 * is safe to enter schedule().
1487 */
1488 if (unlikely(current->mm->core_waiters) &&
1489 unlikely(current->mm == current->parent->mm))
1490 return 0;
1491
1492 return 1;
1493}
1494
1a669c2f
RM
1495/*
1496 * Return nonzero if there is a SIGKILL that should be waking us up.
1497 * Called with the siglock held.
1498 */
1499static int sigkill_pending(struct task_struct *tsk)
1500{
1501 return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1502 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1503 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1504}
1505
1da177e4
LT
1506/*
1507 * This must be called with current->sighand->siglock held.
1508 *
1509 * This should be the path for all ptrace stops.
1510 * We always set current->last_siginfo while stopped here.
1511 * That makes it a way to test a stopped process for
1512 * being ptrace-stopped vs being job-control-stopped.
1513 *
20686a30
ON
1514 * If we actually decide not to stop at all because the tracer
1515 * is gone, we keep current->exit_code unless clear_code.
1da177e4 1516 */
20686a30 1517static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1da177e4 1518{
1a669c2f
RM
1519 int killed = 0;
1520
1521 if (arch_ptrace_stop_needed(exit_code, info)) {
1522 /*
1523 * The arch code has something special to do before a
1524 * ptrace stop. This is allowed to block, e.g. for faults
1525 * on user stack pages. We can't keep the siglock while
1526 * calling arch_ptrace_stop, so we must release it now.
1527 * To preserve proper semantics, we must do this before
1528 * any signal bookkeeping like checking group_stop_count.
1529 * Meanwhile, a SIGKILL could come in before we retake the
1530 * siglock. That must prevent us from sleeping in TASK_TRACED.
1531 * So after regaining the lock, we must check for SIGKILL.
1532 */
1533 spin_unlock_irq(&current->sighand->siglock);
1534 arch_ptrace_stop(exit_code, info);
1535 spin_lock_irq(&current->sighand->siglock);
1536 killed = sigkill_pending(current);
1537 }
1538
1da177e4
LT
1539 /*
1540 * If there is a group stop in progress,
1541 * we must participate in the bookkeeping.
1542 */
1543 if (current->signal->group_stop_count > 0)
1544 --current->signal->group_stop_count;
1545
1546 current->last_siginfo = info;
1547 current->exit_code = exit_code;
1548
1549 /* Let the debugger run. */
d9ae90ac 1550 __set_current_state(TASK_TRACED);
1da177e4
LT
1551 spin_unlock_irq(&current->sighand->siglock);
1552 read_lock(&tasklist_lock);
1a669c2f 1553 if (!unlikely(killed) && may_ptrace_stop()) {
a1d5e21e 1554 do_notify_parent_cldstop(current, CLD_TRAPPED);
1da177e4
LT
1555 read_unlock(&tasklist_lock);
1556 schedule();
1557 } else {
1558 /*
1559 * By the time we got the lock, our tracer went away.
6405f7f4 1560 * Don't drop the lock yet, another tracer may come.
1da177e4 1561 */
6405f7f4 1562 __set_current_state(TASK_RUNNING);
20686a30
ON
1563 if (clear_code)
1564 current->exit_code = 0;
6405f7f4 1565 read_unlock(&tasklist_lock);
1da177e4
LT
1566 }
1567
13b1c3d4
RM
1568 /*
1569 * While in TASK_TRACED, we were considered "frozen enough".
1570 * Now that we woke up, it's crucial if we're supposed to be
1571 * frozen that we freeze now before running anything substantial.
1572 */
1573 try_to_freeze();
1574
1da177e4
LT
1575 /*
1576 * We are back. Now reacquire the siglock before touching
1577 * last_siginfo, so that we are sure to have synchronized with
1578 * any signal-sending on another CPU that wants to examine it.
1579 */
1580 spin_lock_irq(&current->sighand->siglock);
1581 current->last_siginfo = NULL;
1582
1583 /*
1584 * Queued signals ignored us while we were stopped for tracing.
1585 * So check for any that we should take before resuming user mode.
b74d0deb 1586 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 1587 */
b74d0deb 1588 recalc_sigpending_tsk(current);
1da177e4
LT
1589}
1590
1591void ptrace_notify(int exit_code)
1592{
1593 siginfo_t info;
1594
1595 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1596
1597 memset(&info, 0, sizeof info);
1598 info.si_signo = SIGTRAP;
1599 info.si_code = exit_code;
b488893a 1600 info.si_pid = task_pid_vnr(current);
1da177e4
LT
1601 info.si_uid = current->uid;
1602
1603 /* Let the debugger run. */
1604 spin_lock_irq(&current->sighand->siglock);
20686a30 1605 ptrace_stop(exit_code, 1, &info);
1da177e4
LT
1606 spin_unlock_irq(&current->sighand->siglock);
1607}
1608
1da177e4
LT
1609static void
1610finish_stop(int stop_count)
1611{
1612 /*
1613 * If there are no other threads in the group, or if there is
1614 * a group stop in progress and we are the last to stop,
1615 * report to the parent. When ptraced, every thread reports itself.
1616 */
a1d5e21e
ON
1617 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1618 read_lock(&tasklist_lock);
1619 do_notify_parent_cldstop(current, CLD_STOPPED);
1620 read_unlock(&tasklist_lock);
1621 }
bc505a47 1622
3df494a3
RW
1623 do {
1624 schedule();
1625 } while (try_to_freeze());
1da177e4
LT
1626 /*
1627 * Now we don't run again until continued.
1628 */
1629 current->exit_code = 0;
1630}
1631
1632/*
1633 * This performs the stopping for SIGSTOP and other stop signals.
1634 * We have to stop all threads in the thread group.
1635 * Returns nonzero if we've actually stopped and released the siglock.
1636 * Returns zero if we didn't stop and still hold the siglock.
1637 */
a122b341 1638static int do_signal_stop(int signr)
1da177e4
LT
1639{
1640 struct signal_struct *sig = current->signal;
dac27f4a 1641 int stop_count;
1da177e4 1642
1da177e4
LT
1643 if (sig->group_stop_count > 0) {
1644 /*
1645 * There is a group stop in progress. We don't need to
1646 * start another one.
1647 */
1da177e4 1648 stop_count = --sig->group_stop_count;
dac27f4a 1649 } else {
f558b7e4
ON
1650 struct task_struct *t;
1651
2b201a9e 1652 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
573cf9ad 1653 unlikely(signal_group_exit(sig)))
f558b7e4 1654 return 0;
1da177e4
LT
1655 /*
1656 * There is no group stop already in progress.
a122b341 1657 * We must initiate one now.
1da177e4 1658 */
a122b341 1659 sig->group_exit_code = signr;
1da177e4 1660
a122b341
ON
1661 stop_count = 0;
1662 for (t = next_thread(current); t != current; t = next_thread(t))
1da177e4 1663 /*
a122b341
ON
1664 * Setting state to TASK_STOPPED for a group
1665 * stop is always done with the siglock held,
1666 * so this check has no races.
1da177e4 1667 */
d12619b5 1668 if (!(t->flags & PF_EXITING) &&
e1abb39c 1669 !task_is_stopped_or_traced(t)) {
a122b341
ON
1670 stop_count++;
1671 signal_wake_up(t, 0);
1672 }
1673 sig->group_stop_count = stop_count;
1da177e4
LT
1674 }
1675
dac27f4a
ON
1676 if (stop_count == 0)
1677 sig->flags = SIGNAL_STOP_STOPPED;
1678 current->exit_code = sig->group_exit_code;
1679 __set_current_state(TASK_STOPPED);
1680
1681 spin_unlock_irq(&current->sighand->siglock);
1da177e4
LT
1682 finish_stop(stop_count);
1683 return 1;
1684}
1685
18c98b65
RM
1686static int ptrace_signal(int signr, siginfo_t *info,
1687 struct pt_regs *regs, void *cookie)
1688{
1689 if (!(current->ptrace & PT_PTRACED))
1690 return signr;
1691
1692 ptrace_signal_deliver(regs, cookie);
1693
1694 /* Let the debugger run. */
1695 ptrace_stop(signr, 0, info);
1696
1697 /* We're back. Did the debugger cancel the sig? */
1698 signr = current->exit_code;
1699 if (signr == 0)
1700 return signr;
1701
1702 current->exit_code = 0;
1703
1704 /* Update the siginfo structure if the signal has
1705 changed. If the debugger wanted something
1706 specific in the siginfo structure then it should
1707 have updated *info via PTRACE_SETSIGINFO. */
1708 if (signr != info->si_signo) {
1709 info->si_signo = signr;
1710 info->si_errno = 0;
1711 info->si_code = SI_USER;
1712 info->si_pid = task_pid_vnr(current->parent);
1713 info->si_uid = current->parent->uid;
1714 }
1715
1716 /* If the (new) signal is now blocked, requeue it. */
1717 if (sigismember(&current->blocked, signr)) {
1718 specific_send_sig_info(signr, info, current);
1719 signr = 0;
1720 }
1721
1722 return signr;
1723}
1724
1da177e4
LT
1725int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1726 struct pt_regs *regs, void *cookie)
1727{
f6b76d4f
ON
1728 struct sighand_struct *sighand = current->sighand;
1729 struct signal_struct *signal = current->signal;
1730 int signr;
1da177e4 1731
13b1c3d4
RM
1732relock:
1733 /*
1734 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1735 * While in TASK_STOPPED, we were considered "frozen enough".
1736 * Now that we woke up, it's crucial if we're supposed to be
1737 * frozen that we freeze now before running anything substantial.
1738 */
fc558a74
RW
1739 try_to_freeze();
1740
f6b76d4f 1741 spin_lock_irq(&sighand->siglock);
021e1ae3
ON
1742 /*
1743 * Every stopped thread goes here after wakeup. Check to see if
1744 * we should notify the parent, prepare_signal(SIGCONT) encodes
1745 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1746 */
f6b76d4f
ON
1747 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1748 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
e4420551 1749 ? CLD_CONTINUED : CLD_STOPPED;
f6b76d4f
ON
1750 signal->flags &= ~SIGNAL_CLD_MASK;
1751 spin_unlock_irq(&sighand->siglock);
e4420551
ON
1752
1753 read_lock(&tasklist_lock);
1754 do_notify_parent_cldstop(current->group_leader, why);
1755 read_unlock(&tasklist_lock);
1756 goto relock;
1757 }
1758
1da177e4
LT
1759 for (;;) {
1760 struct k_sigaction *ka;
1761
f6b76d4f 1762 if (unlikely(signal->group_stop_count > 0) &&
f558b7e4 1763 do_signal_stop(0))
1da177e4
LT
1764 goto relock;
1765
f6b76d4f 1766 signr = dequeue_signal(current, &current->blocked, info);
1da177e4
LT
1767 if (!signr)
1768 break; /* will return 0 */
1769
18c98b65
RM
1770 if (signr != SIGKILL) {
1771 signr = ptrace_signal(signr, info, regs, cookie);
1772 if (!signr)
1da177e4 1773 continue;
1da177e4
LT
1774 }
1775
f6b76d4f 1776 ka = &sighand->action[signr-1];
1da177e4
LT
1777 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1778 continue;
1779 if (ka->sa.sa_handler != SIG_DFL) {
1780 /* Run the handler. */
1781 *return_ka = *ka;
1782
1783 if (ka->sa.sa_flags & SA_ONESHOT)
1784 ka->sa.sa_handler = SIG_DFL;
1785
1786 break; /* will return non-zero "signr" value */
1787 }
1788
1789 /*
1790 * Now we are doing the default action for this signal.
1791 */
1792 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1793 continue;
1794
84d73786 1795 /*
0fbc26a6 1796 * Global init gets no signals it doesn't want.
84d73786 1797 */
fae5fa44
ON
1798 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1799 !signal_group_exit(signal))
1da177e4
LT
1800 continue;
1801
1802 if (sig_kernel_stop(signr)) {
1803 /*
1804 * The default action is to stop all threads in
1805 * the thread group. The job control signals
1806 * do nothing in an orphaned pgrp, but SIGSTOP
1807 * always works. Note that siglock needs to be
1808 * dropped during the call to is_orphaned_pgrp()
1809 * because of lock ordering with tasklist_lock.
1810 * This allows an intervening SIGCONT to be posted.
1811 * We need to check for that and bail out if necessary.
1812 */
1813 if (signr != SIGSTOP) {
f6b76d4f 1814 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
1815
1816 /* signals can be posted during this window */
1817
3e7cd6c4 1818 if (is_current_pgrp_orphaned())
1da177e4
LT
1819 goto relock;
1820
f6b76d4f 1821 spin_lock_irq(&sighand->siglock);
1da177e4
LT
1822 }
1823
1824 if (likely(do_signal_stop(signr))) {
1825 /* It released the siglock. */
1826 goto relock;
1827 }
1828
1829 /*
1830 * We didn't actually stop, due to a race
1831 * with SIGCONT or something like that.
1832 */
1833 continue;
1834 }
1835
f6b76d4f 1836 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
1837
1838 /*
1839 * Anything else is fatal, maybe with a core dump.
1840 */
1841 current->flags |= PF_SIGNALED;
2dce81bf 1842
1da177e4 1843 if (sig_kernel_coredump(signr)) {
2dce81bf
ON
1844 if (print_fatal_signals)
1845 print_fatal_signal(regs, signr);
1da177e4
LT
1846 /*
1847 * If it was able to dump core, this kills all
1848 * other threads in the group and synchronizes with
1849 * their demise. If we lost the race with another
1850 * thread getting here, it set group_exit_code
1851 * first and our do_group_exit call below will use
1852 * that value and ignore the one we pass it.
1853 */
1854 do_coredump((long)signr, signr, regs);
1855 }
1856
1857 /*
1858 * Death signals, no core dump.
1859 */
1860 do_group_exit(signr);
1861 /* NOTREACHED */
1862 }
f6b76d4f 1863 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
1864 return signr;
1865}
1866
d12619b5
ON
1867void exit_signals(struct task_struct *tsk)
1868{
1869 int group_stop = 0;
5dee1707 1870 struct task_struct *t;
d12619b5 1871
5dee1707
ON
1872 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1873 tsk->flags |= PF_EXITING;
1874 return;
d12619b5
ON
1875 }
1876
5dee1707 1877 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
1878 /*
1879 * From now this task is not visible for group-wide signals,
1880 * see wants_signal(), do_signal_stop().
1881 */
1882 tsk->flags |= PF_EXITING;
5dee1707
ON
1883 if (!signal_pending(tsk))
1884 goto out;
1885
1886 /* It could be that __group_complete_signal() choose us to
1887 * notify about group-wide signal. Another thread should be
1888 * woken now to take the signal since we will not.
1889 */
1890 for (t = tsk; (t = next_thread(t)) != tsk; )
1891 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1892 recalc_sigpending_and_wake(t);
1893
1894 if (unlikely(tsk->signal->group_stop_count) &&
1895 !--tsk->signal->group_stop_count) {
1896 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1897 group_stop = 1;
1898 }
1899out:
d12619b5
ON
1900 spin_unlock_irq(&tsk->sighand->siglock);
1901
1902 if (unlikely(group_stop)) {
1903 read_lock(&tasklist_lock);
1904 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1905 read_unlock(&tasklist_lock);
1906 }
1907}
1908
1da177e4
LT
1909EXPORT_SYMBOL(recalc_sigpending);
1910EXPORT_SYMBOL_GPL(dequeue_signal);
1911EXPORT_SYMBOL(flush_signals);
1912EXPORT_SYMBOL(force_sig);
1da177e4
LT
1913EXPORT_SYMBOL(kill_proc);
1914EXPORT_SYMBOL(ptrace_notify);
1915EXPORT_SYMBOL(send_sig);
1916EXPORT_SYMBOL(send_sig_info);
1917EXPORT_SYMBOL(sigprocmask);
1918EXPORT_SYMBOL(block_all_signals);
1919EXPORT_SYMBOL(unblock_all_signals);
1920
1921
1922/*
1923 * System call entry points.
1924 */
1925
1926asmlinkage long sys_restart_syscall(void)
1927{
1928 struct restart_block *restart = &current_thread_info()->restart_block;
1929 return restart->fn(restart);
1930}
1931
1932long do_no_restart_syscall(struct restart_block *param)
1933{
1934 return -EINTR;
1935}
1936
1937/*
1938 * We don't need to get the kernel lock - this is all local to this
1939 * particular thread.. (and that's good, because this is _heavily_
1940 * used by various programs)
1941 */
1942
1943/*
1944 * This is also useful for kernel threads that want to temporarily
1945 * (or permanently) block certain signals.
1946 *
1947 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1948 * interface happily blocks "unblockable" signals like SIGKILL
1949 * and friends.
1950 */
1951int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1952{
1953 int error;
1da177e4
LT
1954
1955 spin_lock_irq(&current->sighand->siglock);
a26fd335
ON
1956 if (oldset)
1957 *oldset = current->blocked;
1958
1da177e4
LT
1959 error = 0;
1960 switch (how) {
1961 case SIG_BLOCK:
1962 sigorsets(&current->blocked, &current->blocked, set);
1963 break;
1964 case SIG_UNBLOCK:
1965 signandsets(&current->blocked, &current->blocked, set);
1966 break;
1967 case SIG_SETMASK:
1968 current->blocked = *set;
1969 break;
1970 default:
1971 error = -EINVAL;
1972 }
1973 recalc_sigpending();
1974 spin_unlock_irq(&current->sighand->siglock);
a26fd335 1975
1da177e4
LT
1976 return error;
1977}
1978
1979asmlinkage long
1980sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1981{
1982 int error = -EINVAL;
1983 sigset_t old_set, new_set;
1984
1985 /* XXX: Don't preclude handling different sized sigset_t's. */
1986 if (sigsetsize != sizeof(sigset_t))
1987 goto out;
1988
1989 if (set) {
1990 error = -EFAULT;
1991 if (copy_from_user(&new_set, set, sizeof(*set)))
1992 goto out;
1993 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1994
1995 error = sigprocmask(how, &new_set, &old_set);
1996 if (error)
1997 goto out;
1998 if (oset)
1999 goto set_old;
2000 } else if (oset) {
2001 spin_lock_irq(&current->sighand->siglock);
2002 old_set = current->blocked;
2003 spin_unlock_irq(&current->sighand->siglock);
2004
2005 set_old:
2006 error = -EFAULT;
2007 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2008 goto out;
2009 }
2010 error = 0;
2011out:
2012 return error;
2013}
2014
2015long do_sigpending(void __user *set, unsigned long sigsetsize)
2016{
2017 long error = -EINVAL;
2018 sigset_t pending;
2019
2020 if (sigsetsize > sizeof(sigset_t))
2021 goto out;
2022
2023 spin_lock_irq(&current->sighand->siglock);
2024 sigorsets(&pending, &current->pending.signal,
2025 &current->signal->shared_pending.signal);
2026 spin_unlock_irq(&current->sighand->siglock);
2027
2028 /* Outside the lock because only this thread touches it. */
2029 sigandsets(&pending, &current->blocked, &pending);
2030
2031 error = -EFAULT;
2032 if (!copy_to_user(set, &pending, sigsetsize))
2033 error = 0;
2034
2035out:
2036 return error;
2037}
2038
2039asmlinkage long
2040sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2041{
2042 return do_sigpending(set, sigsetsize);
2043}
2044
2045#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2046
2047int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2048{
2049 int err;
2050
2051 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2052 return -EFAULT;
2053 if (from->si_code < 0)
2054 return __copy_to_user(to, from, sizeof(siginfo_t))
2055 ? -EFAULT : 0;
2056 /*
2057 * If you change siginfo_t structure, please be sure
2058 * this code is fixed accordingly.
fba2afaa
DL
2059 * Please remember to update the signalfd_copyinfo() function
2060 * inside fs/signalfd.c too, in case siginfo_t changes.
1da177e4
LT
2061 * It should never copy any pad contained in the structure
2062 * to avoid security leaks, but must copy the generic
2063 * 3 ints plus the relevant union member.
2064 */
2065 err = __put_user(from->si_signo, &to->si_signo);
2066 err |= __put_user(from->si_errno, &to->si_errno);
2067 err |= __put_user((short)from->si_code, &to->si_code);
2068 switch (from->si_code & __SI_MASK) {
2069 case __SI_KILL:
2070 err |= __put_user(from->si_pid, &to->si_pid);
2071 err |= __put_user(from->si_uid, &to->si_uid);
2072 break;
2073 case __SI_TIMER:
2074 err |= __put_user(from->si_tid, &to->si_tid);
2075 err |= __put_user(from->si_overrun, &to->si_overrun);
2076 err |= __put_user(from->si_ptr, &to->si_ptr);
2077 break;
2078 case __SI_POLL:
2079 err |= __put_user(from->si_band, &to->si_band);
2080 err |= __put_user(from->si_fd, &to->si_fd);
2081 break;
2082 case __SI_FAULT:
2083 err |= __put_user(from->si_addr, &to->si_addr);
2084#ifdef __ARCH_SI_TRAPNO
2085 err |= __put_user(from->si_trapno, &to->si_trapno);
2086#endif
2087 break;
2088 case __SI_CHLD:
2089 err |= __put_user(from->si_pid, &to->si_pid);
2090 err |= __put_user(from->si_uid, &to->si_uid);
2091 err |= __put_user(from->si_status, &to->si_status);
2092 err |= __put_user(from->si_utime, &to->si_utime);
2093 err |= __put_user(from->si_stime, &to->si_stime);
2094 break;
2095 case __SI_RT: /* This is not generated by the kernel as of now. */
2096 case __SI_MESGQ: /* But this is */
2097 err |= __put_user(from->si_pid, &to->si_pid);
2098 err |= __put_user(from->si_uid, &to->si_uid);
2099 err |= __put_user(from->si_ptr, &to->si_ptr);
2100 break;
2101 default: /* this is just in case for now ... */
2102 err |= __put_user(from->si_pid, &to->si_pid);
2103 err |= __put_user(from->si_uid, &to->si_uid);
2104 break;
2105 }
2106 return err;
2107}
2108
2109#endif
2110
2111asmlinkage long
2112sys_rt_sigtimedwait(const sigset_t __user *uthese,
2113 siginfo_t __user *uinfo,
2114 const struct timespec __user *uts,
2115 size_t sigsetsize)
2116{
2117 int ret, sig;
2118 sigset_t these;
2119 struct timespec ts;
2120 siginfo_t info;
2121 long timeout = 0;
2122
2123 /* XXX: Don't preclude handling different sized sigset_t's. */
2124 if (sigsetsize != sizeof(sigset_t))
2125 return -EINVAL;
2126
2127 if (copy_from_user(&these, uthese, sizeof(these)))
2128 return -EFAULT;
2129
2130 /*
2131 * Invert the set of allowed signals to get those we
2132 * want to block.
2133 */
2134 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2135 signotset(&these);
2136
2137 if (uts) {
2138 if (copy_from_user(&ts, uts, sizeof(ts)))
2139 return -EFAULT;
2140 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2141 || ts.tv_sec < 0)
2142 return -EINVAL;
2143 }
2144
2145 spin_lock_irq(&current->sighand->siglock);
2146 sig = dequeue_signal(current, &these, &info);
2147 if (!sig) {
2148 timeout = MAX_SCHEDULE_TIMEOUT;
2149 if (uts)
2150 timeout = (timespec_to_jiffies(&ts)
2151 + (ts.tv_sec || ts.tv_nsec));
2152
2153 if (timeout) {
2154 /* None ready -- temporarily unblock those we're
2155 * interested while we are sleeping in so that we'll
2156 * be awakened when they arrive. */
2157 current->real_blocked = current->blocked;
2158 sigandsets(&current->blocked, &current->blocked, &these);
2159 recalc_sigpending();
2160 spin_unlock_irq(&current->sighand->siglock);
2161
75bcc8c5 2162 timeout = schedule_timeout_interruptible(timeout);
1da177e4 2163
1da177e4
LT
2164 spin_lock_irq(&current->sighand->siglock);
2165 sig = dequeue_signal(current, &these, &info);
2166 current->blocked = current->real_blocked;
2167 siginitset(&current->real_blocked, 0);
2168 recalc_sigpending();
2169 }
2170 }
2171 spin_unlock_irq(&current->sighand->siglock);
2172
2173 if (sig) {
2174 ret = sig;
2175 if (uinfo) {
2176 if (copy_siginfo_to_user(uinfo, &info))
2177 ret = -EFAULT;
2178 }
2179 } else {
2180 ret = -EAGAIN;
2181 if (timeout)
2182 ret = -EINTR;
2183 }
2184
2185 return ret;
2186}
2187
2188asmlinkage long
2189sys_kill(int pid, int sig)
2190{
2191 struct siginfo info;
2192
2193 info.si_signo = sig;
2194 info.si_errno = 0;
2195 info.si_code = SI_USER;
b488893a 2196 info.si_pid = task_tgid_vnr(current);
1da177e4
LT
2197 info.si_uid = current->uid;
2198
2199 return kill_something_info(sig, &info, pid);
2200}
2201
6dd69f10 2202static int do_tkill(int tgid, int pid, int sig)
1da177e4 2203{
1da177e4 2204 int error;
6dd69f10 2205 struct siginfo info;
1da177e4 2206 struct task_struct *p;
3547ff3a 2207 unsigned long flags;
1da177e4 2208
6dd69f10 2209 error = -ESRCH;
1da177e4
LT
2210 info.si_signo = sig;
2211 info.si_errno = 0;
2212 info.si_code = SI_TKILL;
b488893a 2213 info.si_pid = task_tgid_vnr(current);
1da177e4
LT
2214 info.si_uid = current->uid;
2215
3547ff3a 2216 rcu_read_lock();
228ebcbe 2217 p = find_task_by_vpid(pid);
b488893a 2218 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
1da177e4
LT
2219 error = check_kill_permission(sig, &info, p);
2220 /*
2221 * The null signal is a permissions and process existence
2222 * probe. No signal is actually delivered.
3547ff3a
ON
2223 *
2224 * If lock_task_sighand() fails we pretend the task dies
2225 * after receiving the signal. The window is tiny, and the
2226 * signal is private anyway.
1da177e4 2227 */
3547ff3a 2228 if (!error && sig && lock_task_sighand(p, &flags)) {
1da177e4 2229 error = specific_send_sig_info(sig, &info, p);
3547ff3a 2230 unlock_task_sighand(p, &flags);
1da177e4
LT
2231 }
2232 }
3547ff3a 2233 rcu_read_unlock();
6dd69f10 2234
1da177e4
LT
2235 return error;
2236}
2237
6dd69f10
VL
2238/**
2239 * sys_tgkill - send signal to one specific thread
2240 * @tgid: the thread group ID of the thread
2241 * @pid: the PID of the thread
2242 * @sig: signal to be sent
2243 *
72fd4a35 2244 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
2245 * exists but it's not belonging to the target process anymore. This
2246 * method solves the problem of threads exiting and PIDs getting reused.
2247 */
2248asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2249{
2250 /* This is only valid for single tasks */
2251 if (pid <= 0 || tgid <= 0)
2252 return -EINVAL;
2253
2254 return do_tkill(tgid, pid, sig);
2255}
2256
1da177e4
LT
2257/*
2258 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2259 */
2260asmlinkage long
2261sys_tkill(int pid, int sig)
2262{
1da177e4
LT
2263 /* This is only valid for single tasks */
2264 if (pid <= 0)
2265 return -EINVAL;
2266
6dd69f10 2267 return do_tkill(0, pid, sig);
1da177e4
LT
2268}
2269
2270asmlinkage long
2271sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2272{
2273 siginfo_t info;
2274
2275 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2276 return -EFAULT;
2277
2278 /* Not even root can pretend to send signals from the kernel.
2279 Nor can they impersonate a kill(), which adds source info. */
2280 if (info.si_code >= 0)
2281 return -EPERM;
2282 info.si_signo = sig;
2283
2284 /* POSIX.1b doesn't mention process groups. */
2285 return kill_proc_info(sig, &info, pid);
2286}
2287
88531f72 2288int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 2289{
93585eea 2290 struct task_struct *t = current;
1da177e4 2291 struct k_sigaction *k;
71fabd5e 2292 sigset_t mask;
1da177e4 2293
7ed20e1a 2294 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2295 return -EINVAL;
2296
93585eea 2297 k = &t->sighand->action[sig-1];
1da177e4
LT
2298
2299 spin_lock_irq(&current->sighand->siglock);
1da177e4
LT
2300 if (oact)
2301 *oact = *k;
2302
2303 if (act) {
9ac95f2f
ON
2304 sigdelsetmask(&act->sa.sa_mask,
2305 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 2306 *k = *act;
1da177e4
LT
2307 /*
2308 * POSIX 3.3.1.3:
2309 * "Setting a signal action to SIG_IGN for a signal that is
2310 * pending shall cause the pending signal to be discarded,
2311 * whether or not it is blocked."
2312 *
2313 * "Setting a signal action to SIG_DFL for a signal that is
2314 * pending and whose default action is to ignore the signal
2315 * (for example, SIGCHLD), shall cause the pending signal to
2316 * be discarded, whether or not it is blocked"
2317 */
93585eea 2318 if (__sig_ignored(t, sig)) {
71fabd5e
GA
2319 sigemptyset(&mask);
2320 sigaddset(&mask, sig);
2321 rm_from_queue_full(&mask, &t->signal->shared_pending);
1da177e4 2322 do {
71fabd5e 2323 rm_from_queue_full(&mask, &t->pending);
1da177e4
LT
2324 t = next_thread(t);
2325 } while (t != current);
1da177e4 2326 }
1da177e4
LT
2327 }
2328
2329 spin_unlock_irq(&current->sighand->siglock);
2330 return 0;
2331}
2332
2333int
2334do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2335{
2336 stack_t oss;
2337 int error;
2338
2339 if (uoss) {
2340 oss.ss_sp = (void __user *) current->sas_ss_sp;
2341 oss.ss_size = current->sas_ss_size;
2342 oss.ss_flags = sas_ss_flags(sp);
2343 }
2344
2345 if (uss) {
2346 void __user *ss_sp;
2347 size_t ss_size;
2348 int ss_flags;
2349
2350 error = -EFAULT;
2351 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2352 || __get_user(ss_sp, &uss->ss_sp)
2353 || __get_user(ss_flags, &uss->ss_flags)
2354 || __get_user(ss_size, &uss->ss_size))
2355 goto out;
2356
2357 error = -EPERM;
2358 if (on_sig_stack(sp))
2359 goto out;
2360
2361 error = -EINVAL;
2362 /*
2363 *
2364 * Note - this code used to test ss_flags incorrectly
2365 * old code may have been written using ss_flags==0
2366 * to mean ss_flags==SS_ONSTACK (as this was the only
2367 * way that worked) - this fix preserves that older
2368 * mechanism
2369 */
2370 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2371 goto out;
2372
2373 if (ss_flags == SS_DISABLE) {
2374 ss_size = 0;
2375 ss_sp = NULL;
2376 } else {
2377 error = -ENOMEM;
2378 if (ss_size < MINSIGSTKSZ)
2379 goto out;
2380 }
2381
2382 current->sas_ss_sp = (unsigned long) ss_sp;
2383 current->sas_ss_size = ss_size;
2384 }
2385
2386 if (uoss) {
2387 error = -EFAULT;
2388 if (copy_to_user(uoss, &oss, sizeof(oss)))
2389 goto out;
2390 }
2391
2392 error = 0;
2393out:
2394 return error;
2395}
2396
2397#ifdef __ARCH_WANT_SYS_SIGPENDING
2398
2399asmlinkage long
2400sys_sigpending(old_sigset_t __user *set)
2401{
2402 return do_sigpending(set, sizeof(*set));
2403}
2404
2405#endif
2406
2407#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2408/* Some platforms have their own version with special arguments others
2409 support only sys_rt_sigprocmask. */
2410
2411asmlinkage long
2412sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2413{
2414 int error;
2415 old_sigset_t old_set, new_set;
2416
2417 if (set) {
2418 error = -EFAULT;
2419 if (copy_from_user(&new_set, set, sizeof(*set)))
2420 goto out;
2421 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2422
2423 spin_lock_irq(&current->sighand->siglock);
2424 old_set = current->blocked.sig[0];
2425
2426 error = 0;
2427 switch (how) {
2428 default:
2429 error = -EINVAL;
2430 break;
2431 case SIG_BLOCK:
2432 sigaddsetmask(&current->blocked, new_set);
2433 break;
2434 case SIG_UNBLOCK:
2435 sigdelsetmask(&current->blocked, new_set);
2436 break;
2437 case SIG_SETMASK:
2438 current->blocked.sig[0] = new_set;
2439 break;
2440 }
2441
2442 recalc_sigpending();
2443 spin_unlock_irq(&current->sighand->siglock);
2444 if (error)
2445 goto out;
2446 if (oset)
2447 goto set_old;
2448 } else if (oset) {
2449 old_set = current->blocked.sig[0];
2450 set_old:
2451 error = -EFAULT;
2452 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2453 goto out;
2454 }
2455 error = 0;
2456out:
2457 return error;
2458}
2459#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2460
2461#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2462asmlinkage long
2463sys_rt_sigaction(int sig,
2464 const struct sigaction __user *act,
2465 struct sigaction __user *oact,
2466 size_t sigsetsize)
2467{
2468 struct k_sigaction new_sa, old_sa;
2469 int ret = -EINVAL;
2470
2471 /* XXX: Don't preclude handling different sized sigset_t's. */
2472 if (sigsetsize != sizeof(sigset_t))
2473 goto out;
2474
2475 if (act) {
2476 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2477 return -EFAULT;
2478 }
2479
2480 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2481
2482 if (!ret && oact) {
2483 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2484 return -EFAULT;
2485 }
2486out:
2487 return ret;
2488}
2489#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2490
2491#ifdef __ARCH_WANT_SYS_SGETMASK
2492
2493/*
2494 * For backwards compatibility. Functionality superseded by sigprocmask.
2495 */
2496asmlinkage long
2497sys_sgetmask(void)
2498{
2499 /* SMP safe */
2500 return current->blocked.sig[0];
2501}
2502
2503asmlinkage long
2504sys_ssetmask(int newmask)
2505{
2506 int old;
2507
2508 spin_lock_irq(&current->sighand->siglock);
2509 old = current->blocked.sig[0];
2510
2511 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2512 sigmask(SIGSTOP)));
2513 recalc_sigpending();
2514 spin_unlock_irq(&current->sighand->siglock);
2515
2516 return old;
2517}
2518#endif /* __ARCH_WANT_SGETMASK */
2519
2520#ifdef __ARCH_WANT_SYS_SIGNAL
2521/*
2522 * For backwards compatibility. Functionality superseded by sigaction.
2523 */
2524asmlinkage unsigned long
2525sys_signal(int sig, __sighandler_t handler)
2526{
2527 struct k_sigaction new_sa, old_sa;
2528 int ret;
2529
2530 new_sa.sa.sa_handler = handler;
2531 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 2532 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
2533
2534 ret = do_sigaction(sig, &new_sa, &old_sa);
2535
2536 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2537}
2538#endif /* __ARCH_WANT_SYS_SIGNAL */
2539
2540#ifdef __ARCH_WANT_SYS_PAUSE
2541
2542asmlinkage long
2543sys_pause(void)
2544{
2545 current->state = TASK_INTERRUPTIBLE;
2546 schedule();
2547 return -ERESTARTNOHAND;
2548}
2549
2550#endif
2551
150256d8
DW
2552#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2553asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2554{
2555 sigset_t newset;
2556
2557 /* XXX: Don't preclude handling different sized sigset_t's. */
2558 if (sigsetsize != sizeof(sigset_t))
2559 return -EINVAL;
2560
2561 if (copy_from_user(&newset, unewset, sizeof(newset)))
2562 return -EFAULT;
2563 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2564
2565 spin_lock_irq(&current->sighand->siglock);
2566 current->saved_sigmask = current->blocked;
2567 current->blocked = newset;
2568 recalc_sigpending();
2569 spin_unlock_irq(&current->sighand->siglock);
2570
2571 current->state = TASK_INTERRUPTIBLE;
2572 schedule();
4e4c22c7 2573 set_restore_sigmask();
150256d8
DW
2574 return -ERESTARTNOHAND;
2575}
2576#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2577
f269fdd1
DH
2578__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2579{
2580 return NULL;
2581}
2582
1da177e4
LT
2583void __init signals_init(void)
2584{
0a31bd5f 2585 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
1da177e4 2586}
This page took 0.580271 seconds and 5 git commands to generate.