[PATCH] w1: Adds a default family so that new slave families will show up in sysfs.
[deliverable/linux.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13#include <linux/config.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/smp_lock.h>
17#include <linux/init.h>
18#include <linux/sched.h>
19#include <linux/fs.h>
20#include <linux/tty.h>
21#include <linux/binfmts.h>
22#include <linux/security.h>
23#include <linux/syscalls.h>
24#include <linux/ptrace.h>
25#include <linux/posix-timers.h>
7ed20e1a 26#include <linux/signal.h>
c2f0c7c3 27#include <linux/audit.h>
1da177e4
LT
28#include <asm/param.h>
29#include <asm/uaccess.h>
30#include <asm/unistd.h>
31#include <asm/siginfo.h>
32
33/*
34 * SLAB caches for signal bits.
35 */
36
37static kmem_cache_t *sigqueue_cachep;
38
39/*
40 * In POSIX a signal is sent either to a specific thread (Linux task)
41 * or to the process as a whole (Linux thread group). How the signal
42 * is sent determines whether it's to one thread or the whole group,
43 * which determines which signal mask(s) are involved in blocking it
44 * from being delivered until later. When the signal is delivered,
45 * either it's caught or ignored by a user handler or it has a default
46 * effect that applies to the whole thread group (POSIX process).
47 *
48 * The possible effects an unblocked signal set to SIG_DFL can have are:
49 * ignore - Nothing Happens
50 * terminate - kill the process, i.e. all threads in the group,
51 * similar to exit_group. The group leader (only) reports
52 * WIFSIGNALED status to its parent.
53 * coredump - write a core dump file describing all threads using
54 * the same mm and then kill all those threads
55 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 *
57 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58 * Other signals when not blocked and set to SIG_DFL behaves as follows.
59 * The job control signals also have other special effects.
60 *
61 * +--------------------+------------------+
62 * | POSIX signal | default action |
63 * +--------------------+------------------+
64 * | SIGHUP | terminate |
65 * | SIGINT | terminate |
66 * | SIGQUIT | coredump |
67 * | SIGILL | coredump |
68 * | SIGTRAP | coredump |
69 * | SIGABRT/SIGIOT | coredump |
70 * | SIGBUS | coredump |
71 * | SIGFPE | coredump |
72 * | SIGKILL | terminate(+) |
73 * | SIGUSR1 | terminate |
74 * | SIGSEGV | coredump |
75 * | SIGUSR2 | terminate |
76 * | SIGPIPE | terminate |
77 * | SIGALRM | terminate |
78 * | SIGTERM | terminate |
79 * | SIGCHLD | ignore |
80 * | SIGCONT | ignore(*) |
81 * | SIGSTOP | stop(*)(+) |
82 * | SIGTSTP | stop(*) |
83 * | SIGTTIN | stop(*) |
84 * | SIGTTOU | stop(*) |
85 * | SIGURG | ignore |
86 * | SIGXCPU | coredump |
87 * | SIGXFSZ | coredump |
88 * | SIGVTALRM | terminate |
89 * | SIGPROF | terminate |
90 * | SIGPOLL/SIGIO | terminate |
91 * | SIGSYS/SIGUNUSED | coredump |
92 * | SIGSTKFLT | terminate |
93 * | SIGWINCH | ignore |
94 * | SIGPWR | terminate |
95 * | SIGRTMIN-SIGRTMAX | terminate |
96 * +--------------------+------------------+
97 * | non-POSIX signal | default action |
98 * +--------------------+------------------+
99 * | SIGEMT | coredump |
100 * +--------------------+------------------+
101 *
102 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103 * (*) Special job control effects:
104 * When SIGCONT is sent, it resumes the process (all threads in the group)
105 * from TASK_STOPPED state and also clears any pending/queued stop signals
106 * (any of those marked with "stop(*)"). This happens regardless of blocking,
107 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
108 * any pending/queued SIGCONT signals; this happens regardless of blocking,
109 * catching, or ignored the stop signal, though (except for SIGSTOP) the
110 * default action of stopping the process may happen later or never.
111 */
112
113#ifdef SIGEMT
114#define M_SIGEMT M(SIGEMT)
115#else
116#define M_SIGEMT 0
117#endif
118
119#if SIGRTMIN > BITS_PER_LONG
120#define M(sig) (1ULL << ((sig)-1))
121#else
122#define M(sig) (1UL << ((sig)-1))
123#endif
124#define T(sig, mask) (M(sig) & (mask))
125
126#define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
128
129#define SIG_KERNEL_STOP_MASK (\
130 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131
132#define SIG_KERNEL_COREDUMP_MASK (\
133 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
134 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
135 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136
137#define SIG_KERNEL_IGNORE_MASK (\
138 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139
140#define sig_kernel_only(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
142#define sig_kernel_coredump(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
144#define sig_kernel_ignore(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
146#define sig_kernel_stop(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148
149#define sig_user_defined(t, signr) \
150 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
151 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152
153#define sig_fatal(t, signr) \
154 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156
157static int sig_ignored(struct task_struct *t, int sig)
158{
159 void __user * handler;
160
161 /*
162 * Tracers always want to know about signals..
163 */
164 if (t->ptrace & PT_PTRACED)
165 return 0;
166
167 /*
168 * Blocked signals are never ignored, since the
169 * signal handler may change by the time it is
170 * unblocked.
171 */
172 if (sigismember(&t->blocked, sig))
173 return 0;
174
175 /* Is it explicitly or implicitly ignored? */
176 handler = t->sighand->action[sig-1].sa.sa_handler;
177 return handler == SIG_IGN ||
178 (handler == SIG_DFL && sig_kernel_ignore(sig));
179}
180
181/*
182 * Re-calculate pending state from the set of locally pending
183 * signals, globally pending signals, and blocked signals.
184 */
185static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
186{
187 unsigned long ready;
188 long i;
189
190 switch (_NSIG_WORDS) {
191 default:
192 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193 ready |= signal->sig[i] &~ blocked->sig[i];
194 break;
195
196 case 4: ready = signal->sig[3] &~ blocked->sig[3];
197 ready |= signal->sig[2] &~ blocked->sig[2];
198 ready |= signal->sig[1] &~ blocked->sig[1];
199 ready |= signal->sig[0] &~ blocked->sig[0];
200 break;
201
202 case 2: ready = signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
204 break;
205
206 case 1: ready = signal->sig[0] &~ blocked->sig[0];
207 }
208 return ready != 0;
209}
210
211#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212
213fastcall void recalc_sigpending_tsk(struct task_struct *t)
214{
215 if (t->signal->group_stop_count > 0 ||
216 PENDING(&t->pending, &t->blocked) ||
217 PENDING(&t->signal->shared_pending, &t->blocked))
218 set_tsk_thread_flag(t, TIF_SIGPENDING);
219 else
220 clear_tsk_thread_flag(t, TIF_SIGPENDING);
221}
222
223void recalc_sigpending(void)
224{
225 recalc_sigpending_tsk(current);
226}
227
228/* Given the mask, find the first available signal that should be serviced. */
229
230static int
231next_signal(struct sigpending *pending, sigset_t *mask)
232{
233 unsigned long i, *s, *m, x;
234 int sig = 0;
235
236 s = pending->signal.sig;
237 m = mask->sig;
238 switch (_NSIG_WORDS) {
239 default:
240 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
241 if ((x = *s &~ *m) != 0) {
242 sig = ffz(~x) + i*_NSIG_BPW + 1;
243 break;
244 }
245 break;
246
247 case 2: if ((x = s[0] &~ m[0]) != 0)
248 sig = 1;
249 else if ((x = s[1] &~ m[1]) != 0)
250 sig = _NSIG_BPW + 1;
251 else
252 break;
253 sig += ffz(~x);
254 break;
255
256 case 1: if ((x = *s &~ *m) != 0)
257 sig = ffz(~x) + 1;
258 break;
259 }
260
261 return sig;
262}
263
264static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
265 int override_rlimit)
266{
267 struct sigqueue *q = NULL;
268
269 atomic_inc(&t->user->sigpending);
270 if (override_rlimit ||
271 atomic_read(&t->user->sigpending) <=
272 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
273 q = kmem_cache_alloc(sigqueue_cachep, flags);
274 if (unlikely(q == NULL)) {
275 atomic_dec(&t->user->sigpending);
276 } else {
277 INIT_LIST_HEAD(&q->list);
278 q->flags = 0;
279 q->lock = NULL;
280 q->user = get_uid(t->user);
281 }
282 return(q);
283}
284
285static inline void __sigqueue_free(struct sigqueue *q)
286{
287 if (q->flags & SIGQUEUE_PREALLOC)
288 return;
289 atomic_dec(&q->user->sigpending);
290 free_uid(q->user);
291 kmem_cache_free(sigqueue_cachep, q);
292}
293
294static void flush_sigqueue(struct sigpending *queue)
295{
296 struct sigqueue *q;
297
298 sigemptyset(&queue->signal);
299 while (!list_empty(&queue->list)) {
300 q = list_entry(queue->list.next, struct sigqueue , list);
301 list_del_init(&q->list);
302 __sigqueue_free(q);
303 }
304}
305
306/*
307 * Flush all pending signals for a task.
308 */
309
310void
311flush_signals(struct task_struct *t)
312{
313 unsigned long flags;
314
315 spin_lock_irqsave(&t->sighand->siglock, flags);
316 clear_tsk_thread_flag(t,TIF_SIGPENDING);
317 flush_sigqueue(&t->pending);
318 flush_sigqueue(&t->signal->shared_pending);
319 spin_unlock_irqrestore(&t->sighand->siglock, flags);
320}
321
322/*
323 * This function expects the tasklist_lock write-locked.
324 */
325void __exit_sighand(struct task_struct *tsk)
326{
327 struct sighand_struct * sighand = tsk->sighand;
328
329 /* Ok, we're done with the signal handlers */
330 tsk->sighand = NULL;
331 if (atomic_dec_and_test(&sighand->count))
332 kmem_cache_free(sighand_cachep, sighand);
333}
334
335void exit_sighand(struct task_struct *tsk)
336{
337 write_lock_irq(&tasklist_lock);
338 __exit_sighand(tsk);
339 write_unlock_irq(&tasklist_lock);
340}
341
342/*
343 * This function expects the tasklist_lock write-locked.
344 */
345void __exit_signal(struct task_struct *tsk)
346{
347 struct signal_struct * sig = tsk->signal;
348 struct sighand_struct * sighand = tsk->sighand;
349
350 if (!sig)
351 BUG();
352 if (!atomic_read(&sig->count))
353 BUG();
354 spin_lock(&sighand->siglock);
355 posix_cpu_timers_exit(tsk);
356 if (atomic_dec_and_test(&sig->count)) {
357 posix_cpu_timers_exit_group(tsk);
358 if (tsk == sig->curr_target)
359 sig->curr_target = next_thread(tsk);
360 tsk->signal = NULL;
361 spin_unlock(&sighand->siglock);
362 flush_sigqueue(&sig->shared_pending);
363 } else {
364 /*
365 * If there is any task waiting for the group exit
366 * then notify it:
367 */
368 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
369 wake_up_process(sig->group_exit_task);
370 sig->group_exit_task = NULL;
371 }
372 if (tsk == sig->curr_target)
373 sig->curr_target = next_thread(tsk);
374 tsk->signal = NULL;
375 /*
376 * Accumulate here the counters for all threads but the
377 * group leader as they die, so they can be added into
378 * the process-wide totals when those are taken.
379 * The group leader stays around as a zombie as long
380 * as there are other threads. When it gets reaped,
381 * the exit.c code will add its counts into these totals.
382 * We won't ever get here for the group leader, since it
383 * will have been the last reference on the signal_struct.
384 */
385 sig->utime = cputime_add(sig->utime, tsk->utime);
386 sig->stime = cputime_add(sig->stime, tsk->stime);
387 sig->min_flt += tsk->min_flt;
388 sig->maj_flt += tsk->maj_flt;
389 sig->nvcsw += tsk->nvcsw;
390 sig->nivcsw += tsk->nivcsw;
391 sig->sched_time += tsk->sched_time;
392 spin_unlock(&sighand->siglock);
393 sig = NULL; /* Marker for below. */
394 }
395 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
396 flush_sigqueue(&tsk->pending);
397 if (sig) {
398 /*
399 * We are cleaning up the signal_struct here. We delayed
400 * calling exit_itimers until after flush_sigqueue, just in
401 * case our thread-local pending queue contained a queued
402 * timer signal that would have been cleared in
403 * exit_itimers. When that called sigqueue_free, it would
404 * attempt to re-take the tasklist_lock and deadlock. This
405 * can never happen if we ensure that all queues the
406 * timer's signal might be queued on have been flushed
407 * first. The shared_pending queue, and our own pending
408 * queue are the only queues the timer could be on, since
409 * there are no other threads left in the group and timer
410 * signals are constrained to threads inside the group.
411 */
412 exit_itimers(sig);
413 exit_thread_group_keys(sig);
414 kmem_cache_free(signal_cachep, sig);
415 }
416}
417
418void exit_signal(struct task_struct *tsk)
419{
420 write_lock_irq(&tasklist_lock);
421 __exit_signal(tsk);
422 write_unlock_irq(&tasklist_lock);
423}
424
425/*
426 * Flush all handlers for a task.
427 */
428
429void
430flush_signal_handlers(struct task_struct *t, int force_default)
431{
432 int i;
433 struct k_sigaction *ka = &t->sighand->action[0];
434 for (i = _NSIG ; i != 0 ; i--) {
435 if (force_default || ka->sa.sa_handler != SIG_IGN)
436 ka->sa.sa_handler = SIG_DFL;
437 ka->sa.sa_flags = 0;
438 sigemptyset(&ka->sa.sa_mask);
439 ka++;
440 }
441}
442
443
444/* Notify the system that a driver wants to block all signals for this
445 * process, and wants to be notified if any signals at all were to be
446 * sent/acted upon. If the notifier routine returns non-zero, then the
447 * signal will be acted upon after all. If the notifier routine returns 0,
448 * then then signal will be blocked. Only one block per process is
449 * allowed. priv is a pointer to private data that the notifier routine
450 * can use to determine if the signal should be blocked or not. */
451
452void
453block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
454{
455 unsigned long flags;
456
457 spin_lock_irqsave(&current->sighand->siglock, flags);
458 current->notifier_mask = mask;
459 current->notifier_data = priv;
460 current->notifier = notifier;
461 spin_unlock_irqrestore(&current->sighand->siglock, flags);
462}
463
464/* Notify the system that blocking has ended. */
465
466void
467unblock_all_signals(void)
468{
469 unsigned long flags;
470
471 spin_lock_irqsave(&current->sighand->siglock, flags);
472 current->notifier = NULL;
473 current->notifier_data = NULL;
474 recalc_sigpending();
475 spin_unlock_irqrestore(&current->sighand->siglock, flags);
476}
477
478static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
479{
480 struct sigqueue *q, *first = NULL;
481 int still_pending = 0;
482
483 if (unlikely(!sigismember(&list->signal, sig)))
484 return 0;
485
486 /*
487 * Collect the siginfo appropriate to this signal. Check if
488 * there is another siginfo for the same signal.
489 */
490 list_for_each_entry(q, &list->list, list) {
491 if (q->info.si_signo == sig) {
492 if (first) {
493 still_pending = 1;
494 break;
495 }
496 first = q;
497 }
498 }
499 if (first) {
500 list_del_init(&first->list);
501 copy_siginfo(info, &first->info);
502 __sigqueue_free(first);
503 if (!still_pending)
504 sigdelset(&list->signal, sig);
505 } else {
506
507 /* Ok, it wasn't in the queue. This must be
508 a fast-pathed signal or we must have been
509 out of queue space. So zero out the info.
510 */
511 sigdelset(&list->signal, sig);
512 info->si_signo = sig;
513 info->si_errno = 0;
514 info->si_code = 0;
515 info->si_pid = 0;
516 info->si_uid = 0;
517 }
518 return 1;
519}
520
521static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
522 siginfo_t *info)
523{
524 int sig = 0;
525
c33880aa
KK
526 /* SIGKILL must have priority, otherwise it is quite easy
527 * to create an unkillable process, sending sig < SIGKILL
528 * to self */
529 if (unlikely(sigismember(&pending->signal, SIGKILL))) {
530 if (!sigismember(mask, SIGKILL))
531 sig = SIGKILL;
532 }
533
534 if (likely(!sig))
535 sig = next_signal(pending, mask);
1da177e4
LT
536 if (sig) {
537 if (current->notifier) {
538 if (sigismember(current->notifier_mask, sig)) {
539 if (!(current->notifier)(current->notifier_data)) {
540 clear_thread_flag(TIF_SIGPENDING);
541 return 0;
542 }
543 }
544 }
545
546 if (!collect_signal(sig, pending, info))
547 sig = 0;
548
549 }
550 recalc_sigpending();
551
552 return sig;
553}
554
555/*
556 * Dequeue a signal and return the element to the caller, which is
557 * expected to free it.
558 *
559 * All callers have to hold the siglock.
560 */
561int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
562{
563 int signr = __dequeue_signal(&tsk->pending, mask, info);
564 if (!signr)
565 signr = __dequeue_signal(&tsk->signal->shared_pending,
566 mask, info);
567 if (signr && unlikely(sig_kernel_stop(signr))) {
568 /*
569 * Set a marker that we have dequeued a stop signal. Our
570 * caller might release the siglock and then the pending
571 * stop signal it is about to process is no longer in the
572 * pending bitmasks, but must still be cleared by a SIGCONT
573 * (and overruled by a SIGKILL). So those cases clear this
574 * shared flag after we've set it. Note that this flag may
575 * remain set after the signal we return is ignored or
576 * handled. That doesn't matter because its only purpose
577 * is to alert stop-signal processing code when another
578 * processor has come along and cleared the flag.
579 */
580 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
581 }
582 if ( signr &&
583 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
584 info->si_sys_private){
585 /*
586 * Release the siglock to ensure proper locking order
587 * of timer locks outside of siglocks. Note, we leave
588 * irqs disabled here, since the posix-timers code is
589 * about to disable them again anyway.
590 */
591 spin_unlock(&tsk->sighand->siglock);
592 do_schedule_next_timer(info);
593 spin_lock(&tsk->sighand->siglock);
594 }
595 return signr;
596}
597
598/*
599 * Tell a process that it has a new active signal..
600 *
601 * NOTE! we rely on the previous spin_lock to
602 * lock interrupts for us! We can only be called with
603 * "siglock" held, and the local interrupt must
604 * have been disabled when that got acquired!
605 *
606 * No need to set need_resched since signal event passing
607 * goes through ->blocked
608 */
609void signal_wake_up(struct task_struct *t, int resume)
610{
611 unsigned int mask;
612
613 set_tsk_thread_flag(t, TIF_SIGPENDING);
614
615 /*
616 * For SIGKILL, we want to wake it up in the stopped/traced case.
617 * We don't check t->state here because there is a race with it
618 * executing another processor and just now entering stopped state.
619 * By using wake_up_state, we ensure the process will wake up and
620 * handle its death signal.
621 */
622 mask = TASK_INTERRUPTIBLE;
623 if (resume)
624 mask |= TASK_STOPPED | TASK_TRACED;
625 if (!wake_up_state(t, mask))
626 kick_process(t);
627}
628
629/*
630 * Remove signals in mask from the pending set and queue.
631 * Returns 1 if any signals were found.
632 *
633 * All callers must be holding the siglock.
634 */
635static int rm_from_queue(unsigned long mask, struct sigpending *s)
636{
637 struct sigqueue *q, *n;
638
639 if (!sigtestsetmask(&s->signal, mask))
640 return 0;
641
642 sigdelsetmask(&s->signal, mask);
643 list_for_each_entry_safe(q, n, &s->list, list) {
644 if (q->info.si_signo < SIGRTMIN &&
645 (mask & sigmask(q->info.si_signo))) {
646 list_del_init(&q->list);
647 __sigqueue_free(q);
648 }
649 }
650 return 1;
651}
652
653/*
654 * Bad permissions for sending the signal
655 */
656static int check_kill_permission(int sig, struct siginfo *info,
657 struct task_struct *t)
658{
659 int error = -EINVAL;
7ed20e1a 660 if (!valid_signal(sig))
1da177e4
LT
661 return error;
662 error = -EPERM;
663 if ((!info || ((unsigned long)info != 1 &&
664 (unsigned long)info != 2 && SI_FROMUSER(info)))
665 && ((sig != SIGCONT) ||
666 (current->signal->session != t->signal->session))
667 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
668 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
669 && !capable(CAP_KILL))
670 return error;
c2f0c7c3
SG
671
672 error = security_task_kill(t, info, sig);
673 if (!error)
674 audit_signal_info(sig, t); /* Let audit system see the signal */
675 return error;
1da177e4
LT
676}
677
678/* forward decl */
679static void do_notify_parent_cldstop(struct task_struct *tsk,
680 struct task_struct *parent,
681 int why);
682
683/*
684 * Handle magic process-wide effects of stop/continue signals.
685 * Unlike the signal actions, these happen immediately at signal-generation
686 * time regardless of blocking, ignoring, or handling. This does the
687 * actual continuing for SIGCONT, but not the actual stopping for stop
688 * signals. The process stop is done as a signal action for SIG_DFL.
689 */
690static void handle_stop_signal(int sig, struct task_struct *p)
691{
692 struct task_struct *t;
693
694 if (p->flags & SIGNAL_GROUP_EXIT)
695 /*
696 * The process is in the middle of dying already.
697 */
698 return;
699
700 if (sig_kernel_stop(sig)) {
701 /*
702 * This is a stop signal. Remove SIGCONT from all queues.
703 */
704 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
705 t = p;
706 do {
707 rm_from_queue(sigmask(SIGCONT), &t->pending);
708 t = next_thread(t);
709 } while (t != p);
710 } else if (sig == SIGCONT) {
711 /*
712 * Remove all stop signals from all queues,
713 * and wake all threads.
714 */
715 if (unlikely(p->signal->group_stop_count > 0)) {
716 /*
717 * There was a group stop in progress. We'll
718 * pretend it finished before we got here. We are
719 * obliged to report it to the parent: if the
720 * SIGSTOP happened "after" this SIGCONT, then it
721 * would have cleared this pending SIGCONT. If it
722 * happened "before" this SIGCONT, then the parent
723 * got the SIGCHLD about the stop finishing before
724 * the continue happened. We do the notification
725 * now, and it's as if the stop had finished and
726 * the SIGCHLD was pending on entry to this kill.
727 */
728 p->signal->group_stop_count = 0;
729 p->signal->flags = SIGNAL_STOP_CONTINUED;
730 spin_unlock(&p->sighand->siglock);
731 if (p->ptrace & PT_PTRACED)
732 do_notify_parent_cldstop(p, p->parent,
733 CLD_STOPPED);
734 else
735 do_notify_parent_cldstop(
736 p->group_leader,
737 p->group_leader->real_parent,
738 CLD_STOPPED);
739 spin_lock(&p->sighand->siglock);
740 }
741 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
742 t = p;
743 do {
744 unsigned int state;
745 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
746
747 /*
748 * If there is a handler for SIGCONT, we must make
749 * sure that no thread returns to user mode before
750 * we post the signal, in case it was the only
751 * thread eligible to run the signal handler--then
752 * it must not do anything between resuming and
753 * running the handler. With the TIF_SIGPENDING
754 * flag set, the thread will pause and acquire the
755 * siglock that we hold now and until we've queued
756 * the pending signal.
757 *
758 * Wake up the stopped thread _after_ setting
759 * TIF_SIGPENDING
760 */
761 state = TASK_STOPPED;
762 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
763 set_tsk_thread_flag(t, TIF_SIGPENDING);
764 state |= TASK_INTERRUPTIBLE;
765 }
766 wake_up_state(t, state);
767
768 t = next_thread(t);
769 } while (t != p);
770
771 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
772 /*
773 * We were in fact stopped, and are now continued.
774 * Notify the parent with CLD_CONTINUED.
775 */
776 p->signal->flags = SIGNAL_STOP_CONTINUED;
777 p->signal->group_exit_code = 0;
778 spin_unlock(&p->sighand->siglock);
779 if (p->ptrace & PT_PTRACED)
780 do_notify_parent_cldstop(p, p->parent,
781 CLD_CONTINUED);
782 else
783 do_notify_parent_cldstop(
784 p->group_leader,
785 p->group_leader->real_parent,
786 CLD_CONTINUED);
787 spin_lock(&p->sighand->siglock);
788 } else {
789 /*
790 * We are not stopped, but there could be a stop
791 * signal in the middle of being processed after
792 * being removed from the queue. Clear that too.
793 */
794 p->signal->flags = 0;
795 }
796 } else if (sig == SIGKILL) {
797 /*
798 * Make sure that any pending stop signal already dequeued
799 * is undone by the wakeup for SIGKILL.
800 */
801 p->signal->flags = 0;
802 }
803}
804
805static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
806 struct sigpending *signals)
807{
808 struct sigqueue * q = NULL;
809 int ret = 0;
810
811 /*
812 * fast-pathed signals for kernel-internal things like SIGSTOP
813 * or SIGKILL.
814 */
815 if ((unsigned long)info == 2)
816 goto out_set;
817
818 /* Real-time signals must be queued if sent by sigqueue, or
819 some other real-time mechanism. It is implementation
820 defined whether kill() does so. We attempt to do so, on
821 the principle of least surprise, but since kill is not
822 allowed to fail with EAGAIN when low on memory we just
823 make sure at least one signal gets delivered and don't
824 pass on the info struct. */
825
826 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
827 ((unsigned long) info < 2 ||
828 info->si_code >= 0)));
829 if (q) {
830 list_add_tail(&q->list, &signals->list);
831 switch ((unsigned long) info) {
832 case 0:
833 q->info.si_signo = sig;
834 q->info.si_errno = 0;
835 q->info.si_code = SI_USER;
836 q->info.si_pid = current->pid;
837 q->info.si_uid = current->uid;
838 break;
839 case 1:
840 q->info.si_signo = sig;
841 q->info.si_errno = 0;
842 q->info.si_code = SI_KERNEL;
843 q->info.si_pid = 0;
844 q->info.si_uid = 0;
845 break;
846 default:
847 copy_siginfo(&q->info, info);
848 break;
849 }
850 } else {
851 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
852 && info->si_code != SI_USER)
853 /*
854 * Queue overflow, abort. We may abort if the signal was rt
855 * and sent by user using something other than kill().
856 */
857 return -EAGAIN;
858 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
859 /*
860 * Set up a return to indicate that we dropped
861 * the signal.
862 */
863 ret = info->si_sys_private;
864 }
865
866out_set:
867 sigaddset(&signals->signal, sig);
868 return ret;
869}
870
871#define LEGACY_QUEUE(sigptr, sig) \
872 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
873
874
875static int
876specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
877{
878 int ret = 0;
879
880 if (!irqs_disabled())
881 BUG();
882 assert_spin_locked(&t->sighand->siglock);
883
884 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
885 /*
886 * Set up a return to indicate that we dropped the signal.
887 */
888 ret = info->si_sys_private;
889
890 /* Short-circuit ignored signals. */
891 if (sig_ignored(t, sig))
892 goto out;
893
894 /* Support queueing exactly one non-rt signal, so that we
895 can get more detailed information about the cause of
896 the signal. */
897 if (LEGACY_QUEUE(&t->pending, sig))
898 goto out;
899
900 ret = send_signal(sig, info, t, &t->pending);
901 if (!ret && !sigismember(&t->blocked, sig))
902 signal_wake_up(t, sig == SIGKILL);
903out:
904 return ret;
905}
906
907/*
908 * Force a signal that the process can't ignore: if necessary
909 * we unblock the signal and change any SIG_IGN to SIG_DFL.
910 */
911
912int
913force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
914{
915 unsigned long int flags;
916 int ret;
917
918 spin_lock_irqsave(&t->sighand->siglock, flags);
919 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
920 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
921 sigdelset(&t->blocked, sig);
922 recalc_sigpending_tsk(t);
923 }
924 ret = specific_send_sig_info(sig, info, t);
925 spin_unlock_irqrestore(&t->sighand->siglock, flags);
926
927 return ret;
928}
929
930void
931force_sig_specific(int sig, struct task_struct *t)
932{
933 unsigned long int flags;
934
935 spin_lock_irqsave(&t->sighand->siglock, flags);
936 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
937 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
938 sigdelset(&t->blocked, sig);
939 recalc_sigpending_tsk(t);
940 specific_send_sig_info(sig, (void *)2, t);
941 spin_unlock_irqrestore(&t->sighand->siglock, flags);
942}
943
944/*
945 * Test if P wants to take SIG. After we've checked all threads with this,
946 * it's equivalent to finding no threads not blocking SIG. Any threads not
947 * blocking SIG were ruled out because they are not running and already
948 * have pending signals. Such threads will dequeue from the shared queue
949 * as soon as they're available, so putting the signal on the shared queue
950 * will be equivalent to sending it to one such thread.
951 */
952#define wants_signal(sig, p, mask) \
953 (!sigismember(&(p)->blocked, sig) \
954 && !((p)->state & mask) \
955 && !((p)->flags & PF_EXITING) \
956 && (task_curr(p) || !signal_pending(p)))
957
958
959static void
960__group_complete_signal(int sig, struct task_struct *p)
961{
962 unsigned int mask;
963 struct task_struct *t;
964
965 /*
966 * Don't bother traced and stopped tasks (but
967 * SIGKILL will punch through that).
968 */
969 mask = TASK_STOPPED | TASK_TRACED;
970 if (sig == SIGKILL)
971 mask = 0;
972
973 /*
974 * Now find a thread we can wake up to take the signal off the queue.
975 *
976 * If the main thread wants the signal, it gets first crack.
977 * Probably the least surprising to the average bear.
978 */
979 if (wants_signal(sig, p, mask))
980 t = p;
981 else if (thread_group_empty(p))
982 /*
983 * There is just one thread and it does not need to be woken.
984 * It will dequeue unblocked signals before it runs again.
985 */
986 return;
987 else {
988 /*
989 * Otherwise try to find a suitable thread.
990 */
991 t = p->signal->curr_target;
992 if (t == NULL)
993 /* restart balancing at this thread */
994 t = p->signal->curr_target = p;
995 BUG_ON(t->tgid != p->tgid);
996
997 while (!wants_signal(sig, t, mask)) {
998 t = next_thread(t);
999 if (t == p->signal->curr_target)
1000 /*
1001 * No thread needs to be woken.
1002 * Any eligible threads will see
1003 * the signal in the queue soon.
1004 */
1005 return;
1006 }
1007 p->signal->curr_target = t;
1008 }
1009
1010 /*
1011 * Found a killable thread. If the signal will be fatal,
1012 * then start taking the whole group down immediately.
1013 */
1014 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1015 !sigismember(&t->real_blocked, sig) &&
1016 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1017 /*
1018 * This signal will be fatal to the whole group.
1019 */
1020 if (!sig_kernel_coredump(sig)) {
1021 /*
1022 * Start a group exit and wake everybody up.
1023 * This way we don't have other threads
1024 * running and doing things after a slower
1025 * thread has the fatal signal pending.
1026 */
1027 p->signal->flags = SIGNAL_GROUP_EXIT;
1028 p->signal->group_exit_code = sig;
1029 p->signal->group_stop_count = 0;
1030 t = p;
1031 do {
1032 sigaddset(&t->pending.signal, SIGKILL);
1033 signal_wake_up(t, 1);
1034 t = next_thread(t);
1035 } while (t != p);
1036 return;
1037 }
1038
1039 /*
1040 * There will be a core dump. We make all threads other
1041 * than the chosen one go into a group stop so that nothing
1042 * happens until it gets scheduled, takes the signal off
1043 * the shared queue, and does the core dump. This is a
1044 * little more complicated than strictly necessary, but it
1045 * keeps the signal state that winds up in the core dump
1046 * unchanged from the death state, e.g. which thread had
1047 * the core-dump signal unblocked.
1048 */
1049 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1050 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1051 p->signal->group_stop_count = 0;
1052 p->signal->group_exit_task = t;
1053 t = p;
1054 do {
1055 p->signal->group_stop_count++;
1056 signal_wake_up(t, 0);
1057 t = next_thread(t);
1058 } while (t != p);
1059 wake_up_process(p->signal->group_exit_task);
1060 return;
1061 }
1062
1063 /*
1064 * The signal is already in the shared-pending queue.
1065 * Tell the chosen thread to wake up and dequeue it.
1066 */
1067 signal_wake_up(t, sig == SIGKILL);
1068 return;
1069}
1070
1071int
1072__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1073{
1074 int ret = 0;
1075
1076 assert_spin_locked(&p->sighand->siglock);
1077 handle_stop_signal(sig, p);
1078
1079 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1080 /*
1081 * Set up a return to indicate that we dropped the signal.
1082 */
1083 ret = info->si_sys_private;
1084
1085 /* Short-circuit ignored signals. */
1086 if (sig_ignored(p, sig))
1087 return ret;
1088
1089 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1090 /* This is a non-RT signal and we already have one queued. */
1091 return ret;
1092
1093 /*
1094 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1095 * We always use the shared queue for process-wide signals,
1096 * to avoid several races.
1097 */
1098 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1099 if (unlikely(ret))
1100 return ret;
1101
1102 __group_complete_signal(sig, p);
1103 return 0;
1104}
1105
1106/*
1107 * Nuke all other threads in the group.
1108 */
1109void zap_other_threads(struct task_struct *p)
1110{
1111 struct task_struct *t;
1112
1113 p->signal->flags = SIGNAL_GROUP_EXIT;
1114 p->signal->group_stop_count = 0;
1115
1116 if (thread_group_empty(p))
1117 return;
1118
1119 for (t = next_thread(p); t != p; t = next_thread(t)) {
1120 /*
1121 * Don't bother with already dead threads
1122 */
1123 if (t->exit_state)
1124 continue;
1125
1126 /*
1127 * We don't want to notify the parent, since we are
1128 * killed as part of a thread group due to another
1129 * thread doing an execve() or similar. So set the
1130 * exit signal to -1 to allow immediate reaping of
1131 * the process. But don't detach the thread group
1132 * leader.
1133 */
1134 if (t != p->group_leader)
1135 t->exit_signal = -1;
1136
1137 sigaddset(&t->pending.signal, SIGKILL);
1138 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1139 signal_wake_up(t, 1);
1140 }
1141}
1142
1143/*
1144 * Must be called with the tasklist_lock held for reading!
1145 */
1146int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1147{
1148 unsigned long flags;
1149 int ret;
1150
1151 ret = check_kill_permission(sig, info, p);
1152 if (!ret && sig && p->sighand) {
1153 spin_lock_irqsave(&p->sighand->siglock, flags);
1154 ret = __group_send_sig_info(sig, info, p);
1155 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1156 }
1157
1158 return ret;
1159}
1160
1161/*
1162 * kill_pg_info() sends a signal to a process group: this is what the tty
1163 * control characters do (^C, ^Z etc)
1164 */
1165
1166int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1167{
1168 struct task_struct *p = NULL;
1169 int retval, success;
1170
1171 if (pgrp <= 0)
1172 return -EINVAL;
1173
1174 success = 0;
1175 retval = -ESRCH;
1176 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1177 int err = group_send_sig_info(sig, info, p);
1178 success |= !err;
1179 retval = err;
1180 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1181 return success ? 0 : retval;
1182}
1183
1184int
1185kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1186{
1187 int retval;
1188
1189 read_lock(&tasklist_lock);
1190 retval = __kill_pg_info(sig, info, pgrp);
1191 read_unlock(&tasklist_lock);
1192
1193 return retval;
1194}
1195
1196int
1197kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1198{
1199 int error;
1200 struct task_struct *p;
1201
1202 read_lock(&tasklist_lock);
1203 p = find_task_by_pid(pid);
1204 error = -ESRCH;
1205 if (p)
1206 error = group_send_sig_info(sig, info, p);
1207 read_unlock(&tasklist_lock);
1208 return error;
1209}
1210
1211
1212/*
1213 * kill_something_info() interprets pid in interesting ways just like kill(2).
1214 *
1215 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1216 * is probably wrong. Should make it like BSD or SYSV.
1217 */
1218
1219static int kill_something_info(int sig, struct siginfo *info, int pid)
1220{
1221 if (!pid) {
1222 return kill_pg_info(sig, info, process_group(current));
1223 } else if (pid == -1) {
1224 int retval = 0, count = 0;
1225 struct task_struct * p;
1226
1227 read_lock(&tasklist_lock);
1228 for_each_process(p) {
1229 if (p->pid > 1 && p->tgid != current->tgid) {
1230 int err = group_send_sig_info(sig, info, p);
1231 ++count;
1232 if (err != -EPERM)
1233 retval = err;
1234 }
1235 }
1236 read_unlock(&tasklist_lock);
1237 return count ? retval : -ESRCH;
1238 } else if (pid < 0) {
1239 return kill_pg_info(sig, info, -pid);
1240 } else {
1241 return kill_proc_info(sig, info, pid);
1242 }
1243}
1244
1245/*
1246 * These are for backward compatibility with the rest of the kernel source.
1247 */
1248
1249/*
1250 * These two are the most common entry points. They send a signal
1251 * just to the specific thread.
1252 */
1253int
1254send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1255{
1256 int ret;
1257 unsigned long flags;
1258
1259 /*
1260 * Make sure legacy kernel users don't send in bad values
1261 * (normal paths check this in check_kill_permission).
1262 */
7ed20e1a 1263 if (!valid_signal(sig))
1da177e4
LT
1264 return -EINVAL;
1265
1266 /*
1267 * We need the tasklist lock even for the specific
1268 * thread case (when we don't need to follow the group
1269 * lists) in order to avoid races with "p->sighand"
1270 * going away or changing from under us.
1271 */
1272 read_lock(&tasklist_lock);
1273 spin_lock_irqsave(&p->sighand->siglock, flags);
1274 ret = specific_send_sig_info(sig, info, p);
1275 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1276 read_unlock(&tasklist_lock);
1277 return ret;
1278}
1279
1280int
1281send_sig(int sig, struct task_struct *p, int priv)
1282{
1283 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1284}
1285
1286/*
1287 * This is the entry point for "process-wide" signals.
1288 * They will go to an appropriate thread in the thread group.
1289 */
1290int
1291send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1292{
1293 int ret;
1294 read_lock(&tasklist_lock);
1295 ret = group_send_sig_info(sig, info, p);
1296 read_unlock(&tasklist_lock);
1297 return ret;
1298}
1299
1300void
1301force_sig(int sig, struct task_struct *p)
1302{
1303 force_sig_info(sig, (void*)1L, p);
1304}
1305
1306/*
1307 * When things go south during signal handling, we
1308 * will force a SIGSEGV. And if the signal that caused
1309 * the problem was already a SIGSEGV, we'll want to
1310 * make sure we don't even try to deliver the signal..
1311 */
1312int
1313force_sigsegv(int sig, struct task_struct *p)
1314{
1315 if (sig == SIGSEGV) {
1316 unsigned long flags;
1317 spin_lock_irqsave(&p->sighand->siglock, flags);
1318 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1319 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1320 }
1321 force_sig(SIGSEGV, p);
1322 return 0;
1323}
1324
1325int
1326kill_pg(pid_t pgrp, int sig, int priv)
1327{
1328 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1329}
1330
1331int
1332kill_proc(pid_t pid, int sig, int priv)
1333{
1334 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1335}
1336
1337/*
1338 * These functions support sending signals using preallocated sigqueue
1339 * structures. This is needed "because realtime applications cannot
1340 * afford to lose notifications of asynchronous events, like timer
1341 * expirations or I/O completions". In the case of Posix Timers
1342 * we allocate the sigqueue structure from the timer_create. If this
1343 * allocation fails we are able to report the failure to the application
1344 * with an EAGAIN error.
1345 */
1346
1347struct sigqueue *sigqueue_alloc(void)
1348{
1349 struct sigqueue *q;
1350
1351 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1352 q->flags |= SIGQUEUE_PREALLOC;
1353 return(q);
1354}
1355
1356void sigqueue_free(struct sigqueue *q)
1357{
1358 unsigned long flags;
1359 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1360 /*
1361 * If the signal is still pending remove it from the
1362 * pending queue.
1363 */
1364 if (unlikely(!list_empty(&q->list))) {
1365 read_lock(&tasklist_lock);
1366 spin_lock_irqsave(q->lock, flags);
1367 if (!list_empty(&q->list))
1368 list_del_init(&q->list);
1369 spin_unlock_irqrestore(q->lock, flags);
1370 read_unlock(&tasklist_lock);
1371 }
1372 q->flags &= ~SIGQUEUE_PREALLOC;
1373 __sigqueue_free(q);
1374}
1375
1376int
1377send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1378{
1379 unsigned long flags;
1380 int ret = 0;
1381
1382 /*
1383 * We need the tasklist lock even for the specific
1384 * thread case (when we don't need to follow the group
1385 * lists) in order to avoid races with "p->sighand"
1386 * going away or changing from under us.
1387 */
1388 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1389 read_lock(&tasklist_lock);
1390 spin_lock_irqsave(&p->sighand->siglock, flags);
1391
1392 if (unlikely(!list_empty(&q->list))) {
1393 /*
1394 * If an SI_TIMER entry is already queue just increment
1395 * the overrun count.
1396 */
1397 if (q->info.si_code != SI_TIMER)
1398 BUG();
1399 q->info.si_overrun++;
1400 goto out;
1401 }
1402 /* Short-circuit ignored signals. */
1403 if (sig_ignored(p, sig)) {
1404 ret = 1;
1405 goto out;
1406 }
1407
1408 q->lock = &p->sighand->siglock;
1409 list_add_tail(&q->list, &p->pending.list);
1410 sigaddset(&p->pending.signal, sig);
1411 if (!sigismember(&p->blocked, sig))
1412 signal_wake_up(p, sig == SIGKILL);
1413
1414out:
1415 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1416 read_unlock(&tasklist_lock);
1417 return(ret);
1418}
1419
1420int
1421send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1422{
1423 unsigned long flags;
1424 int ret = 0;
1425
1426 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1427 read_lock(&tasklist_lock);
1428 spin_lock_irqsave(&p->sighand->siglock, flags);
1429 handle_stop_signal(sig, p);
1430
1431 /* Short-circuit ignored signals. */
1432 if (sig_ignored(p, sig)) {
1433 ret = 1;
1434 goto out;
1435 }
1436
1437 if (unlikely(!list_empty(&q->list))) {
1438 /*
1439 * If an SI_TIMER entry is already queue just increment
1440 * the overrun count. Other uses should not try to
1441 * send the signal multiple times.
1442 */
1443 if (q->info.si_code != SI_TIMER)
1444 BUG();
1445 q->info.si_overrun++;
1446 goto out;
1447 }
1448
1449 /*
1450 * Put this signal on the shared-pending queue.
1451 * We always use the shared queue for process-wide signals,
1452 * to avoid several races.
1453 */
1454 q->lock = &p->sighand->siglock;
1455 list_add_tail(&q->list, &p->signal->shared_pending.list);
1456 sigaddset(&p->signal->shared_pending.signal, sig);
1457
1458 __group_complete_signal(sig, p);
1459out:
1460 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1461 read_unlock(&tasklist_lock);
1462 return(ret);
1463}
1464
1465/*
1466 * Wake up any threads in the parent blocked in wait* syscalls.
1467 */
1468static inline void __wake_up_parent(struct task_struct *p,
1469 struct task_struct *parent)
1470{
1471 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1472}
1473
1474/*
1475 * Let a parent know about the death of a child.
1476 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1477 */
1478
1479void do_notify_parent(struct task_struct *tsk, int sig)
1480{
1481 struct siginfo info;
1482 unsigned long flags;
1483 struct sighand_struct *psig;
1484
1485 BUG_ON(sig == -1);
1486
1487 /* do_notify_parent_cldstop should have been called instead. */
1488 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1489
1490 BUG_ON(!tsk->ptrace &&
1491 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1492
1493 info.si_signo = sig;
1494 info.si_errno = 0;
1495 info.si_pid = tsk->pid;
1496 info.si_uid = tsk->uid;
1497
1498 /* FIXME: find out whether or not this is supposed to be c*time. */
1499 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1500 tsk->signal->utime));
1501 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1502 tsk->signal->stime));
1503
1504 info.si_status = tsk->exit_code & 0x7f;
1505 if (tsk->exit_code & 0x80)
1506 info.si_code = CLD_DUMPED;
1507 else if (tsk->exit_code & 0x7f)
1508 info.si_code = CLD_KILLED;
1509 else {
1510 info.si_code = CLD_EXITED;
1511 info.si_status = tsk->exit_code >> 8;
1512 }
1513
1514 psig = tsk->parent->sighand;
1515 spin_lock_irqsave(&psig->siglock, flags);
1516 if (sig == SIGCHLD &&
1517 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1518 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1519 /*
1520 * We are exiting and our parent doesn't care. POSIX.1
1521 * defines special semantics for setting SIGCHLD to SIG_IGN
1522 * or setting the SA_NOCLDWAIT flag: we should be reaped
1523 * automatically and not left for our parent's wait4 call.
1524 * Rather than having the parent do it as a magic kind of
1525 * signal handler, we just set this to tell do_exit that we
1526 * can be cleaned up without becoming a zombie. Note that
1527 * we still call __wake_up_parent in this case, because a
1528 * blocked sys_wait4 might now return -ECHILD.
1529 *
1530 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1531 * is implementation-defined: we do (if you don't want
1532 * it, just use SIG_IGN instead).
1533 */
1534 tsk->exit_signal = -1;
1535 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1536 sig = 0;
1537 }
7ed20e1a 1538 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1539 __group_send_sig_info(sig, &info, tsk->parent);
1540 __wake_up_parent(tsk, tsk->parent);
1541 spin_unlock_irqrestore(&psig->siglock, flags);
1542}
1543
1544static void
1545do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1546 int why)
1547{
1548 struct siginfo info;
1549 unsigned long flags;
1550 struct sighand_struct *sighand;
1551
1552 info.si_signo = SIGCHLD;
1553 info.si_errno = 0;
1554 info.si_pid = tsk->pid;
1555 info.si_uid = tsk->uid;
1556
1557 /* FIXME: find out whether or not this is supposed to be c*time. */
1558 info.si_utime = cputime_to_jiffies(tsk->utime);
1559 info.si_stime = cputime_to_jiffies(tsk->stime);
1560
1561 info.si_code = why;
1562 switch (why) {
1563 case CLD_CONTINUED:
1564 info.si_status = SIGCONT;
1565 break;
1566 case CLD_STOPPED:
1567 info.si_status = tsk->signal->group_exit_code & 0x7f;
1568 break;
1569 case CLD_TRAPPED:
1570 info.si_status = tsk->exit_code & 0x7f;
1571 break;
1572 default:
1573 BUG();
1574 }
1575
1576 sighand = parent->sighand;
1577 spin_lock_irqsave(&sighand->siglock, flags);
1578 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1579 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1580 __group_send_sig_info(SIGCHLD, &info, parent);
1581 /*
1582 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1583 */
1584 __wake_up_parent(tsk, parent);
1585 spin_unlock_irqrestore(&sighand->siglock, flags);
1586}
1587
1588/*
1589 * This must be called with current->sighand->siglock held.
1590 *
1591 * This should be the path for all ptrace stops.
1592 * We always set current->last_siginfo while stopped here.
1593 * That makes it a way to test a stopped process for
1594 * being ptrace-stopped vs being job-control-stopped.
1595 *
1596 * If we actually decide not to stop at all because the tracer is gone,
1597 * we leave nostop_code in current->exit_code.
1598 */
1599static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1600{
1601 /*
1602 * If there is a group stop in progress,
1603 * we must participate in the bookkeeping.
1604 */
1605 if (current->signal->group_stop_count > 0)
1606 --current->signal->group_stop_count;
1607
1608 current->last_siginfo = info;
1609 current->exit_code = exit_code;
1610
1611 /* Let the debugger run. */
1612 set_current_state(TASK_TRACED);
1613 spin_unlock_irq(&current->sighand->siglock);
1614 read_lock(&tasklist_lock);
1615 if (likely(current->ptrace & PT_PTRACED) &&
1616 likely(current->parent != current->real_parent ||
1617 !(current->ptrace & PT_ATTACHED)) &&
1618 (likely(current->parent->signal != current->signal) ||
1619 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1620 do_notify_parent_cldstop(current, current->parent,
1621 CLD_TRAPPED);
1622 read_unlock(&tasklist_lock);
1623 schedule();
1624 } else {
1625 /*
1626 * By the time we got the lock, our tracer went away.
1627 * Don't stop here.
1628 */
1629 read_unlock(&tasklist_lock);
1630 set_current_state(TASK_RUNNING);
1631 current->exit_code = nostop_code;
1632 }
1633
1634 /*
1635 * We are back. Now reacquire the siglock before touching
1636 * last_siginfo, so that we are sure to have synchronized with
1637 * any signal-sending on another CPU that wants to examine it.
1638 */
1639 spin_lock_irq(&current->sighand->siglock);
1640 current->last_siginfo = NULL;
1641
1642 /*
1643 * Queued signals ignored us while we were stopped for tracing.
1644 * So check for any that we should take before resuming user mode.
1645 */
1646 recalc_sigpending();
1647}
1648
1649void ptrace_notify(int exit_code)
1650{
1651 siginfo_t info;
1652
1653 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1654
1655 memset(&info, 0, sizeof info);
1656 info.si_signo = SIGTRAP;
1657 info.si_code = exit_code;
1658 info.si_pid = current->pid;
1659 info.si_uid = current->uid;
1660
1661 /* Let the debugger run. */
1662 spin_lock_irq(&current->sighand->siglock);
1663 ptrace_stop(exit_code, 0, &info);
1664 spin_unlock_irq(&current->sighand->siglock);
1665}
1666
1da177e4
LT
1667static void
1668finish_stop(int stop_count)
1669{
1670 /*
1671 * If there are no other threads in the group, or if there is
1672 * a group stop in progress and we are the last to stop,
1673 * report to the parent. When ptraced, every thread reports itself.
1674 */
1675 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1676 read_lock(&tasklist_lock);
1677 do_notify_parent_cldstop(current, current->parent,
1678 CLD_STOPPED);
1679 read_unlock(&tasklist_lock);
1680 }
1681 else if (stop_count == 0) {
1682 read_lock(&tasklist_lock);
1683 do_notify_parent_cldstop(current->group_leader,
1684 current->group_leader->real_parent,
1685 CLD_STOPPED);
1686 read_unlock(&tasklist_lock);
1687 }
1688
1689 schedule();
1690 /*
1691 * Now we don't run again until continued.
1692 */
1693 current->exit_code = 0;
1694}
1695
1696/*
1697 * This performs the stopping for SIGSTOP and other stop signals.
1698 * We have to stop all threads in the thread group.
1699 * Returns nonzero if we've actually stopped and released the siglock.
1700 * Returns zero if we didn't stop and still hold the siglock.
1701 */
1702static int
1703do_signal_stop(int signr)
1704{
1705 struct signal_struct *sig = current->signal;
1706 struct sighand_struct *sighand = current->sighand;
1707 int stop_count = -1;
1708
1709 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1710 return 0;
1711
1712 if (sig->group_stop_count > 0) {
1713 /*
1714 * There is a group stop in progress. We don't need to
1715 * start another one.
1716 */
1717 signr = sig->group_exit_code;
1718 stop_count = --sig->group_stop_count;
1719 current->exit_code = signr;
1720 set_current_state(TASK_STOPPED);
1721 if (stop_count == 0)
1722 sig->flags = SIGNAL_STOP_STOPPED;
1723 spin_unlock_irq(&sighand->siglock);
1724 }
1725 else if (thread_group_empty(current)) {
1726 /*
1727 * Lock must be held through transition to stopped state.
1728 */
1729 current->exit_code = current->signal->group_exit_code = signr;
1730 set_current_state(TASK_STOPPED);
1731 sig->flags = SIGNAL_STOP_STOPPED;
1732 spin_unlock_irq(&sighand->siglock);
1733 }
1734 else {
1735 /*
1736 * There is no group stop already in progress.
1737 * We must initiate one now, but that requires
1738 * dropping siglock to get both the tasklist lock
1739 * and siglock again in the proper order. Note that
1740 * this allows an intervening SIGCONT to be posted.
1741 * We need to check for that and bail out if necessary.
1742 */
1743 struct task_struct *t;
1744
1745 spin_unlock_irq(&sighand->siglock);
1746
1747 /* signals can be posted during this window */
1748
1749 read_lock(&tasklist_lock);
1750 spin_lock_irq(&sighand->siglock);
1751
1752 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1753 /*
1754 * Another stop or continue happened while we
1755 * didn't have the lock. We can just swallow this
1756 * signal now. If we raced with a SIGCONT, that
1757 * should have just cleared it now. If we raced
1758 * with another processor delivering a stop signal,
1759 * then the SIGCONT that wakes us up should clear it.
1760 */
1761 read_unlock(&tasklist_lock);
1762 return 0;
1763 }
1764
1765 if (sig->group_stop_count == 0) {
1766 sig->group_exit_code = signr;
1767 stop_count = 0;
1768 for (t = next_thread(current); t != current;
1769 t = next_thread(t))
1770 /*
1771 * Setting state to TASK_STOPPED for a group
1772 * stop is always done with the siglock held,
1773 * so this check has no races.
1774 */
1775 if (t->state < TASK_STOPPED) {
1776 stop_count++;
1777 signal_wake_up(t, 0);
1778 }
1779 sig->group_stop_count = stop_count;
1780 }
1781 else {
1782 /* A race with another thread while unlocked. */
1783 signr = sig->group_exit_code;
1784 stop_count = --sig->group_stop_count;
1785 }
1786
1787 current->exit_code = signr;
1788 set_current_state(TASK_STOPPED);
1789 if (stop_count == 0)
1790 sig->flags = SIGNAL_STOP_STOPPED;
1791
1792 spin_unlock_irq(&sighand->siglock);
1793 read_unlock(&tasklist_lock);
1794 }
1795
1796 finish_stop(stop_count);
1797 return 1;
1798}
1799
1800/*
1801 * Do appropriate magic when group_stop_count > 0.
1802 * We return nonzero if we stopped, after releasing the siglock.
1803 * We return zero if we still hold the siglock and should look
1804 * for another signal without checking group_stop_count again.
1805 */
1806static inline int handle_group_stop(void)
1807{
1808 int stop_count;
1809
1810 if (current->signal->group_exit_task == current) {
1811 /*
1812 * Group stop is so we can do a core dump,
1813 * We are the initiating thread, so get on with it.
1814 */
1815 current->signal->group_exit_task = NULL;
1816 return 0;
1817 }
1818
1819 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1820 /*
1821 * Group stop is so another thread can do a core dump,
1822 * or else we are racing against a death signal.
1823 * Just punt the stop so we can get the next signal.
1824 */
1825 return 0;
1826
1827 /*
1828 * There is a group stop in progress. We stop
1829 * without any associated signal being in our queue.
1830 */
1831 stop_count = --current->signal->group_stop_count;
1832 if (stop_count == 0)
1833 current->signal->flags = SIGNAL_STOP_STOPPED;
1834 current->exit_code = current->signal->group_exit_code;
1835 set_current_state(TASK_STOPPED);
1836 spin_unlock_irq(&current->sighand->siglock);
1837 finish_stop(stop_count);
1838 return 1;
1839}
1840
1841int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1842 struct pt_regs *regs, void *cookie)
1843{
1844 sigset_t *mask = &current->blocked;
1845 int signr = 0;
1846
1847relock:
1848 spin_lock_irq(&current->sighand->siglock);
1849 for (;;) {
1850 struct k_sigaction *ka;
1851
1852 if (unlikely(current->signal->group_stop_count > 0) &&
1853 handle_group_stop())
1854 goto relock;
1855
1856 signr = dequeue_signal(current, mask, info);
1857
1858 if (!signr)
1859 break; /* will return 0 */
1860
1861 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1862 ptrace_signal_deliver(regs, cookie);
1863
1864 /* Let the debugger run. */
1865 ptrace_stop(signr, signr, info);
1866
1867 /* We're back. Did the debugger cancel the sig? */
1868 signr = current->exit_code;
1869 if (signr == 0)
1870 continue;
1871
1872 current->exit_code = 0;
1873
1874 /* Update the siginfo structure if the signal has
1875 changed. If the debugger wanted something
1876 specific in the siginfo structure then it should
1877 have updated *info via PTRACE_SETSIGINFO. */
1878 if (signr != info->si_signo) {
1879 info->si_signo = signr;
1880 info->si_errno = 0;
1881 info->si_code = SI_USER;
1882 info->si_pid = current->parent->pid;
1883 info->si_uid = current->parent->uid;
1884 }
1885
1886 /* If the (new) signal is now blocked, requeue it. */
1887 if (sigismember(&current->blocked, signr)) {
1888 specific_send_sig_info(signr, info, current);
1889 continue;
1890 }
1891 }
1892
1893 ka = &current->sighand->action[signr-1];
1894 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1895 continue;
1896 if (ka->sa.sa_handler != SIG_DFL) {
1897 /* Run the handler. */
1898 *return_ka = *ka;
1899
1900 if (ka->sa.sa_flags & SA_ONESHOT)
1901 ka->sa.sa_handler = SIG_DFL;
1902
1903 break; /* will return non-zero "signr" value */
1904 }
1905
1906 /*
1907 * Now we are doing the default action for this signal.
1908 */
1909 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1910 continue;
1911
1912 /* Init gets no signals it doesn't want. */
1913 if (current->pid == 1)
1914 continue;
1915
1916 if (sig_kernel_stop(signr)) {
1917 /*
1918 * The default action is to stop all threads in
1919 * the thread group. The job control signals
1920 * do nothing in an orphaned pgrp, but SIGSTOP
1921 * always works. Note that siglock needs to be
1922 * dropped during the call to is_orphaned_pgrp()
1923 * because of lock ordering with tasklist_lock.
1924 * This allows an intervening SIGCONT to be posted.
1925 * We need to check for that and bail out if necessary.
1926 */
1927 if (signr != SIGSTOP) {
1928 spin_unlock_irq(&current->sighand->siglock);
1929
1930 /* signals can be posted during this window */
1931
1932 if (is_orphaned_pgrp(process_group(current)))
1933 goto relock;
1934
1935 spin_lock_irq(&current->sighand->siglock);
1936 }
1937
1938 if (likely(do_signal_stop(signr))) {
1939 /* It released the siglock. */
1940 goto relock;
1941 }
1942
1943 /*
1944 * We didn't actually stop, due to a race
1945 * with SIGCONT or something like that.
1946 */
1947 continue;
1948 }
1949
1950 spin_unlock_irq(&current->sighand->siglock);
1951
1952 /*
1953 * Anything else is fatal, maybe with a core dump.
1954 */
1955 current->flags |= PF_SIGNALED;
1956 if (sig_kernel_coredump(signr)) {
1957 /*
1958 * If it was able to dump core, this kills all
1959 * other threads in the group and synchronizes with
1960 * their demise. If we lost the race with another
1961 * thread getting here, it set group_exit_code
1962 * first and our do_group_exit call below will use
1963 * that value and ignore the one we pass it.
1964 */
1965 do_coredump((long)signr, signr, regs);
1966 }
1967
1968 /*
1969 * Death signals, no core dump.
1970 */
1971 do_group_exit(signr);
1972 /* NOTREACHED */
1973 }
1974 spin_unlock_irq(&current->sighand->siglock);
1975 return signr;
1976}
1977
1da177e4
LT
1978EXPORT_SYMBOL(recalc_sigpending);
1979EXPORT_SYMBOL_GPL(dequeue_signal);
1980EXPORT_SYMBOL(flush_signals);
1981EXPORT_SYMBOL(force_sig);
1982EXPORT_SYMBOL(kill_pg);
1983EXPORT_SYMBOL(kill_proc);
1984EXPORT_SYMBOL(ptrace_notify);
1985EXPORT_SYMBOL(send_sig);
1986EXPORT_SYMBOL(send_sig_info);
1987EXPORT_SYMBOL(sigprocmask);
1988EXPORT_SYMBOL(block_all_signals);
1989EXPORT_SYMBOL(unblock_all_signals);
1990
1991
1992/*
1993 * System call entry points.
1994 */
1995
1996asmlinkage long sys_restart_syscall(void)
1997{
1998 struct restart_block *restart = &current_thread_info()->restart_block;
1999 return restart->fn(restart);
2000}
2001
2002long do_no_restart_syscall(struct restart_block *param)
2003{
2004 return -EINTR;
2005}
2006
2007/*
2008 * We don't need to get the kernel lock - this is all local to this
2009 * particular thread.. (and that's good, because this is _heavily_
2010 * used by various programs)
2011 */
2012
2013/*
2014 * This is also useful for kernel threads that want to temporarily
2015 * (or permanently) block certain signals.
2016 *
2017 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2018 * interface happily blocks "unblockable" signals like SIGKILL
2019 * and friends.
2020 */
2021int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2022{
2023 int error;
2024 sigset_t old_block;
2025
2026 spin_lock_irq(&current->sighand->siglock);
2027 old_block = current->blocked;
2028 error = 0;
2029 switch (how) {
2030 case SIG_BLOCK:
2031 sigorsets(&current->blocked, &current->blocked, set);
2032 break;
2033 case SIG_UNBLOCK:
2034 signandsets(&current->blocked, &current->blocked, set);
2035 break;
2036 case SIG_SETMASK:
2037 current->blocked = *set;
2038 break;
2039 default:
2040 error = -EINVAL;
2041 }
2042 recalc_sigpending();
2043 spin_unlock_irq(&current->sighand->siglock);
2044 if (oldset)
2045 *oldset = old_block;
2046 return error;
2047}
2048
2049asmlinkage long
2050sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2051{
2052 int error = -EINVAL;
2053 sigset_t old_set, new_set;
2054
2055 /* XXX: Don't preclude handling different sized sigset_t's. */
2056 if (sigsetsize != sizeof(sigset_t))
2057 goto out;
2058
2059 if (set) {
2060 error = -EFAULT;
2061 if (copy_from_user(&new_set, set, sizeof(*set)))
2062 goto out;
2063 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2064
2065 error = sigprocmask(how, &new_set, &old_set);
2066 if (error)
2067 goto out;
2068 if (oset)
2069 goto set_old;
2070 } else if (oset) {
2071 spin_lock_irq(&current->sighand->siglock);
2072 old_set = current->blocked;
2073 spin_unlock_irq(&current->sighand->siglock);
2074
2075 set_old:
2076 error = -EFAULT;
2077 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2078 goto out;
2079 }
2080 error = 0;
2081out:
2082 return error;
2083}
2084
2085long do_sigpending(void __user *set, unsigned long sigsetsize)
2086{
2087 long error = -EINVAL;
2088 sigset_t pending;
2089
2090 if (sigsetsize > sizeof(sigset_t))
2091 goto out;
2092
2093 spin_lock_irq(&current->sighand->siglock);
2094 sigorsets(&pending, &current->pending.signal,
2095 &current->signal->shared_pending.signal);
2096 spin_unlock_irq(&current->sighand->siglock);
2097
2098 /* Outside the lock because only this thread touches it. */
2099 sigandsets(&pending, &current->blocked, &pending);
2100
2101 error = -EFAULT;
2102 if (!copy_to_user(set, &pending, sigsetsize))
2103 error = 0;
2104
2105out:
2106 return error;
2107}
2108
2109asmlinkage long
2110sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2111{
2112 return do_sigpending(set, sigsetsize);
2113}
2114
2115#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2116
2117int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2118{
2119 int err;
2120
2121 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2122 return -EFAULT;
2123 if (from->si_code < 0)
2124 return __copy_to_user(to, from, sizeof(siginfo_t))
2125 ? -EFAULT : 0;
2126 /*
2127 * If you change siginfo_t structure, please be sure
2128 * this code is fixed accordingly.
2129 * It should never copy any pad contained in the structure
2130 * to avoid security leaks, but must copy the generic
2131 * 3 ints plus the relevant union member.
2132 */
2133 err = __put_user(from->si_signo, &to->si_signo);
2134 err |= __put_user(from->si_errno, &to->si_errno);
2135 err |= __put_user((short)from->si_code, &to->si_code);
2136 switch (from->si_code & __SI_MASK) {
2137 case __SI_KILL:
2138 err |= __put_user(from->si_pid, &to->si_pid);
2139 err |= __put_user(from->si_uid, &to->si_uid);
2140 break;
2141 case __SI_TIMER:
2142 err |= __put_user(from->si_tid, &to->si_tid);
2143 err |= __put_user(from->si_overrun, &to->si_overrun);
2144 err |= __put_user(from->si_ptr, &to->si_ptr);
2145 break;
2146 case __SI_POLL:
2147 err |= __put_user(from->si_band, &to->si_band);
2148 err |= __put_user(from->si_fd, &to->si_fd);
2149 break;
2150 case __SI_FAULT:
2151 err |= __put_user(from->si_addr, &to->si_addr);
2152#ifdef __ARCH_SI_TRAPNO
2153 err |= __put_user(from->si_trapno, &to->si_trapno);
2154#endif
2155 break;
2156 case __SI_CHLD:
2157 err |= __put_user(from->si_pid, &to->si_pid);
2158 err |= __put_user(from->si_uid, &to->si_uid);
2159 err |= __put_user(from->si_status, &to->si_status);
2160 err |= __put_user(from->si_utime, &to->si_utime);
2161 err |= __put_user(from->si_stime, &to->si_stime);
2162 break;
2163 case __SI_RT: /* This is not generated by the kernel as of now. */
2164 case __SI_MESGQ: /* But this is */
2165 err |= __put_user(from->si_pid, &to->si_pid);
2166 err |= __put_user(from->si_uid, &to->si_uid);
2167 err |= __put_user(from->si_ptr, &to->si_ptr);
2168 break;
2169 default: /* this is just in case for now ... */
2170 err |= __put_user(from->si_pid, &to->si_pid);
2171 err |= __put_user(from->si_uid, &to->si_uid);
2172 break;
2173 }
2174 return err;
2175}
2176
2177#endif
2178
2179asmlinkage long
2180sys_rt_sigtimedwait(const sigset_t __user *uthese,
2181 siginfo_t __user *uinfo,
2182 const struct timespec __user *uts,
2183 size_t sigsetsize)
2184{
2185 int ret, sig;
2186 sigset_t these;
2187 struct timespec ts;
2188 siginfo_t info;
2189 long timeout = 0;
2190
2191 /* XXX: Don't preclude handling different sized sigset_t's. */
2192 if (sigsetsize != sizeof(sigset_t))
2193 return -EINVAL;
2194
2195 if (copy_from_user(&these, uthese, sizeof(these)))
2196 return -EFAULT;
2197
2198 /*
2199 * Invert the set of allowed signals to get those we
2200 * want to block.
2201 */
2202 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2203 signotset(&these);
2204
2205 if (uts) {
2206 if (copy_from_user(&ts, uts, sizeof(ts)))
2207 return -EFAULT;
2208 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2209 || ts.tv_sec < 0)
2210 return -EINVAL;
2211 }
2212
2213 spin_lock_irq(&current->sighand->siglock);
2214 sig = dequeue_signal(current, &these, &info);
2215 if (!sig) {
2216 timeout = MAX_SCHEDULE_TIMEOUT;
2217 if (uts)
2218 timeout = (timespec_to_jiffies(&ts)
2219 + (ts.tv_sec || ts.tv_nsec));
2220
2221 if (timeout) {
2222 /* None ready -- temporarily unblock those we're
2223 * interested while we are sleeping in so that we'll
2224 * be awakened when they arrive. */
2225 current->real_blocked = current->blocked;
2226 sigandsets(&current->blocked, &current->blocked, &these);
2227 recalc_sigpending();
2228 spin_unlock_irq(&current->sighand->siglock);
2229
2230 current->state = TASK_INTERRUPTIBLE;
2231 timeout = schedule_timeout(timeout);
2232
2233 if (current->flags & PF_FREEZE)
2234 refrigerator(PF_FREEZE);
2235 spin_lock_irq(&current->sighand->siglock);
2236 sig = dequeue_signal(current, &these, &info);
2237 current->blocked = current->real_blocked;
2238 siginitset(&current->real_blocked, 0);
2239 recalc_sigpending();
2240 }
2241 }
2242 spin_unlock_irq(&current->sighand->siglock);
2243
2244 if (sig) {
2245 ret = sig;
2246 if (uinfo) {
2247 if (copy_siginfo_to_user(uinfo, &info))
2248 ret = -EFAULT;
2249 }
2250 } else {
2251 ret = -EAGAIN;
2252 if (timeout)
2253 ret = -EINTR;
2254 }
2255
2256 return ret;
2257}
2258
2259asmlinkage long
2260sys_kill(int pid, int sig)
2261{
2262 struct siginfo info;
2263
2264 info.si_signo = sig;
2265 info.si_errno = 0;
2266 info.si_code = SI_USER;
2267 info.si_pid = current->tgid;
2268 info.si_uid = current->uid;
2269
2270 return kill_something_info(sig, &info, pid);
2271}
2272
2273/**
2274 * sys_tgkill - send signal to one specific thread
2275 * @tgid: the thread group ID of the thread
2276 * @pid: the PID of the thread
2277 * @sig: signal to be sent
2278 *
2279 * This syscall also checks the tgid and returns -ESRCH even if the PID
2280 * exists but it's not belonging to the target process anymore. This
2281 * method solves the problem of threads exiting and PIDs getting reused.
2282 */
2283asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2284{
2285 struct siginfo info;
2286 int error;
2287 struct task_struct *p;
2288
2289 /* This is only valid for single tasks */
2290 if (pid <= 0 || tgid <= 0)
2291 return -EINVAL;
2292
2293 info.si_signo = sig;
2294 info.si_errno = 0;
2295 info.si_code = SI_TKILL;
2296 info.si_pid = current->tgid;
2297 info.si_uid = current->uid;
2298
2299 read_lock(&tasklist_lock);
2300 p = find_task_by_pid(pid);
2301 error = -ESRCH;
2302 if (p && (p->tgid == tgid)) {
2303 error = check_kill_permission(sig, &info, p);
2304 /*
2305 * The null signal is a permissions and process existence
2306 * probe. No signal is actually delivered.
2307 */
2308 if (!error && sig && p->sighand) {
2309 spin_lock_irq(&p->sighand->siglock);
2310 handle_stop_signal(sig, p);
2311 error = specific_send_sig_info(sig, &info, p);
2312 spin_unlock_irq(&p->sighand->siglock);
2313 }
2314 }
2315 read_unlock(&tasklist_lock);
2316 return error;
2317}
2318
2319/*
2320 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2321 */
2322asmlinkage long
2323sys_tkill(int pid, int sig)
2324{
2325 struct siginfo info;
2326 int error;
2327 struct task_struct *p;
2328
2329 /* This is only valid for single tasks */
2330 if (pid <= 0)
2331 return -EINVAL;
2332
2333 info.si_signo = sig;
2334 info.si_errno = 0;
2335 info.si_code = SI_TKILL;
2336 info.si_pid = current->tgid;
2337 info.si_uid = current->uid;
2338
2339 read_lock(&tasklist_lock);
2340 p = find_task_by_pid(pid);
2341 error = -ESRCH;
2342 if (p) {
2343 error = check_kill_permission(sig, &info, p);
2344 /*
2345 * The null signal is a permissions and process existence
2346 * probe. No signal is actually delivered.
2347 */
2348 if (!error && sig && p->sighand) {
2349 spin_lock_irq(&p->sighand->siglock);
2350 handle_stop_signal(sig, p);
2351 error = specific_send_sig_info(sig, &info, p);
2352 spin_unlock_irq(&p->sighand->siglock);
2353 }
2354 }
2355 read_unlock(&tasklist_lock);
2356 return error;
2357}
2358
2359asmlinkage long
2360sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2361{
2362 siginfo_t info;
2363
2364 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2365 return -EFAULT;
2366
2367 /* Not even root can pretend to send signals from the kernel.
2368 Nor can they impersonate a kill(), which adds source info. */
2369 if (info.si_code >= 0)
2370 return -EPERM;
2371 info.si_signo = sig;
2372
2373 /* POSIX.1b doesn't mention process groups. */
2374 return kill_proc_info(sig, &info, pid);
2375}
2376
2377int
2378do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2379{
2380 struct k_sigaction *k;
2381
7ed20e1a 2382 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2383 return -EINVAL;
2384
2385 k = &current->sighand->action[sig-1];
2386
2387 spin_lock_irq(&current->sighand->siglock);
2388 if (signal_pending(current)) {
2389 /*
2390 * If there might be a fatal signal pending on multiple
2391 * threads, make sure we take it before changing the action.
2392 */
2393 spin_unlock_irq(&current->sighand->siglock);
2394 return -ERESTARTNOINTR;
2395 }
2396
2397 if (oact)
2398 *oact = *k;
2399
2400 if (act) {
2401 /*
2402 * POSIX 3.3.1.3:
2403 * "Setting a signal action to SIG_IGN for a signal that is
2404 * pending shall cause the pending signal to be discarded,
2405 * whether or not it is blocked."
2406 *
2407 * "Setting a signal action to SIG_DFL for a signal that is
2408 * pending and whose default action is to ignore the signal
2409 * (for example, SIGCHLD), shall cause the pending signal to
2410 * be discarded, whether or not it is blocked"
2411 */
2412 if (act->sa.sa_handler == SIG_IGN ||
2413 (act->sa.sa_handler == SIG_DFL &&
2414 sig_kernel_ignore(sig))) {
2415 /*
2416 * This is a fairly rare case, so we only take the
2417 * tasklist_lock once we're sure we'll need it.
2418 * Now we must do this little unlock and relock
2419 * dance to maintain the lock hierarchy.
2420 */
2421 struct task_struct *t = current;
2422 spin_unlock_irq(&t->sighand->siglock);
2423 read_lock(&tasklist_lock);
2424 spin_lock_irq(&t->sighand->siglock);
2425 *k = *act;
2426 sigdelsetmask(&k->sa.sa_mask,
2427 sigmask(SIGKILL) | sigmask(SIGSTOP));
2428 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2429 do {
2430 rm_from_queue(sigmask(sig), &t->pending);
2431 recalc_sigpending_tsk(t);
2432 t = next_thread(t);
2433 } while (t != current);
2434 spin_unlock_irq(&current->sighand->siglock);
2435 read_unlock(&tasklist_lock);
2436 return 0;
2437 }
2438
2439 *k = *act;
2440 sigdelsetmask(&k->sa.sa_mask,
2441 sigmask(SIGKILL) | sigmask(SIGSTOP));
2442 }
2443
2444 spin_unlock_irq(&current->sighand->siglock);
2445 return 0;
2446}
2447
2448int
2449do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2450{
2451 stack_t oss;
2452 int error;
2453
2454 if (uoss) {
2455 oss.ss_sp = (void __user *) current->sas_ss_sp;
2456 oss.ss_size = current->sas_ss_size;
2457 oss.ss_flags = sas_ss_flags(sp);
2458 }
2459
2460 if (uss) {
2461 void __user *ss_sp;
2462 size_t ss_size;
2463 int ss_flags;
2464
2465 error = -EFAULT;
2466 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2467 || __get_user(ss_sp, &uss->ss_sp)
2468 || __get_user(ss_flags, &uss->ss_flags)
2469 || __get_user(ss_size, &uss->ss_size))
2470 goto out;
2471
2472 error = -EPERM;
2473 if (on_sig_stack(sp))
2474 goto out;
2475
2476 error = -EINVAL;
2477 /*
2478 *
2479 * Note - this code used to test ss_flags incorrectly
2480 * old code may have been written using ss_flags==0
2481 * to mean ss_flags==SS_ONSTACK (as this was the only
2482 * way that worked) - this fix preserves that older
2483 * mechanism
2484 */
2485 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2486 goto out;
2487
2488 if (ss_flags == SS_DISABLE) {
2489 ss_size = 0;
2490 ss_sp = NULL;
2491 } else {
2492 error = -ENOMEM;
2493 if (ss_size < MINSIGSTKSZ)
2494 goto out;
2495 }
2496
2497 current->sas_ss_sp = (unsigned long) ss_sp;
2498 current->sas_ss_size = ss_size;
2499 }
2500
2501 if (uoss) {
2502 error = -EFAULT;
2503 if (copy_to_user(uoss, &oss, sizeof(oss)))
2504 goto out;
2505 }
2506
2507 error = 0;
2508out:
2509 return error;
2510}
2511
2512#ifdef __ARCH_WANT_SYS_SIGPENDING
2513
2514asmlinkage long
2515sys_sigpending(old_sigset_t __user *set)
2516{
2517 return do_sigpending(set, sizeof(*set));
2518}
2519
2520#endif
2521
2522#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2523/* Some platforms have their own version with special arguments others
2524 support only sys_rt_sigprocmask. */
2525
2526asmlinkage long
2527sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2528{
2529 int error;
2530 old_sigset_t old_set, new_set;
2531
2532 if (set) {
2533 error = -EFAULT;
2534 if (copy_from_user(&new_set, set, sizeof(*set)))
2535 goto out;
2536 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2537
2538 spin_lock_irq(&current->sighand->siglock);
2539 old_set = current->blocked.sig[0];
2540
2541 error = 0;
2542 switch (how) {
2543 default:
2544 error = -EINVAL;
2545 break;
2546 case SIG_BLOCK:
2547 sigaddsetmask(&current->blocked, new_set);
2548 break;
2549 case SIG_UNBLOCK:
2550 sigdelsetmask(&current->blocked, new_set);
2551 break;
2552 case SIG_SETMASK:
2553 current->blocked.sig[0] = new_set;
2554 break;
2555 }
2556
2557 recalc_sigpending();
2558 spin_unlock_irq(&current->sighand->siglock);
2559 if (error)
2560 goto out;
2561 if (oset)
2562 goto set_old;
2563 } else if (oset) {
2564 old_set = current->blocked.sig[0];
2565 set_old:
2566 error = -EFAULT;
2567 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2568 goto out;
2569 }
2570 error = 0;
2571out:
2572 return error;
2573}
2574#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2575
2576#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2577asmlinkage long
2578sys_rt_sigaction(int sig,
2579 const struct sigaction __user *act,
2580 struct sigaction __user *oact,
2581 size_t sigsetsize)
2582{
2583 struct k_sigaction new_sa, old_sa;
2584 int ret = -EINVAL;
2585
2586 /* XXX: Don't preclude handling different sized sigset_t's. */
2587 if (sigsetsize != sizeof(sigset_t))
2588 goto out;
2589
2590 if (act) {
2591 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2592 return -EFAULT;
2593 }
2594
2595 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2596
2597 if (!ret && oact) {
2598 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2599 return -EFAULT;
2600 }
2601out:
2602 return ret;
2603}
2604#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2605
2606#ifdef __ARCH_WANT_SYS_SGETMASK
2607
2608/*
2609 * For backwards compatibility. Functionality superseded by sigprocmask.
2610 */
2611asmlinkage long
2612sys_sgetmask(void)
2613{
2614 /* SMP safe */
2615 return current->blocked.sig[0];
2616}
2617
2618asmlinkage long
2619sys_ssetmask(int newmask)
2620{
2621 int old;
2622
2623 spin_lock_irq(&current->sighand->siglock);
2624 old = current->blocked.sig[0];
2625
2626 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2627 sigmask(SIGSTOP)));
2628 recalc_sigpending();
2629 spin_unlock_irq(&current->sighand->siglock);
2630
2631 return old;
2632}
2633#endif /* __ARCH_WANT_SGETMASK */
2634
2635#ifdef __ARCH_WANT_SYS_SIGNAL
2636/*
2637 * For backwards compatibility. Functionality superseded by sigaction.
2638 */
2639asmlinkage unsigned long
2640sys_signal(int sig, __sighandler_t handler)
2641{
2642 struct k_sigaction new_sa, old_sa;
2643 int ret;
2644
2645 new_sa.sa.sa_handler = handler;
2646 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2647
2648 ret = do_sigaction(sig, &new_sa, &old_sa);
2649
2650 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2651}
2652#endif /* __ARCH_WANT_SYS_SIGNAL */
2653
2654#ifdef __ARCH_WANT_SYS_PAUSE
2655
2656asmlinkage long
2657sys_pause(void)
2658{
2659 current->state = TASK_INTERRUPTIBLE;
2660 schedule();
2661 return -ERESTARTNOHAND;
2662}
2663
2664#endif
2665
2666void __init signals_init(void)
2667{
2668 sigqueue_cachep =
2669 kmem_cache_create("sigqueue",
2670 sizeof(struct sigqueue),
2671 __alignof__(struct sigqueue),
2672 SLAB_PANIC, NULL, NULL);
2673}
This page took 0.13635 seconds and 5 git commands to generate.