2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 #include <trace/sched.h>
32 #include <asm/param.h>
33 #include <asm/uaccess.h>
34 #include <asm/unistd.h>
35 #include <asm/siginfo.h>
36 #include "audit.h" /* audit_signal_info() */
39 * SLAB caches for signal bits.
42 static struct kmem_cache
*sigqueue_cachep
;
44 DEFINE_TRACE(sched_signal_send
);
46 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
48 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
51 static int sig_handler_ignored(void __user
*handler
, int sig
)
53 /* Is it explicitly or implicitly ignored? */
54 return handler
== SIG_IGN
||
55 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
58 static int sig_task_ignored(struct task_struct
*t
, int sig
)
62 handler
= sig_handler(t
, sig
);
64 if (unlikely(t
->signal
->flags
& SIGNAL_UNKILLABLE
) &&
68 return sig_handler_ignored(handler
, sig
);
71 static int sig_ignored(struct task_struct
*t
, int sig
)
74 * Blocked signals are never ignored, since the
75 * signal handler may change by the time it is
78 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
81 if (!sig_task_ignored(t
, sig
))
85 * Tracers may want to know about even ignored signals.
87 return !tracehook_consider_ignored_signal(t
, sig
);
91 * Re-calculate pending state from the set of locally pending
92 * signals, globally pending signals, and blocked signals.
94 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
99 switch (_NSIG_WORDS
) {
101 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
102 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
105 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
106 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
107 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
108 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
111 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
112 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
115 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
120 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
122 static int recalc_sigpending_tsk(struct task_struct
*t
)
124 if (t
->signal
->group_stop_count
> 0 ||
125 PENDING(&t
->pending
, &t
->blocked
) ||
126 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
127 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
131 * We must never clear the flag in another thread, or in current
132 * when it's possible the current syscall is returning -ERESTART*.
133 * So we don't clear it here, and only callers who know they should do.
139 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
140 * This is superfluous when called on current, the wakeup is a harmless no-op.
142 void recalc_sigpending_and_wake(struct task_struct
*t
)
144 if (recalc_sigpending_tsk(t
))
145 signal_wake_up(t
, 0);
148 void recalc_sigpending(void)
150 if (unlikely(tracehook_force_sigpending()))
151 set_thread_flag(TIF_SIGPENDING
);
152 else if (!recalc_sigpending_tsk(current
) && !freezing(current
))
153 clear_thread_flag(TIF_SIGPENDING
);
157 /* Given the mask, find the first available signal that should be serviced. */
159 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
161 unsigned long i
, *s
, *m
, x
;
164 s
= pending
->signal
.sig
;
166 switch (_NSIG_WORDS
) {
168 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
169 if ((x
= *s
&~ *m
) != 0) {
170 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
175 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
177 else if ((x
= s
[1] &~ m
[1]) != 0)
184 case 1: if ((x
= *s
&~ *m
) != 0)
193 * allocate a new signal queue record
194 * - this may be called without locks if and only if t == current, otherwise an
195 * appopriate lock must be held to stop the target task from exiting
197 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, gfp_t flags
,
200 struct sigqueue
*q
= NULL
;
201 struct user_struct
*user
;
204 * We won't get problems with the target's UID changing under us
205 * because changing it requires RCU be used, and if t != current, the
206 * caller must be holding the RCU readlock (by way of a spinlock) and
207 * we use RCU protection here
209 user
= get_uid(__task_cred(t
)->user
);
210 atomic_inc(&user
->sigpending
);
211 if (override_rlimit
||
212 atomic_read(&user
->sigpending
) <=
213 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
214 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
215 if (unlikely(q
== NULL
)) {
216 atomic_dec(&user
->sigpending
);
219 INIT_LIST_HEAD(&q
->list
);
227 static void __sigqueue_free(struct sigqueue
*q
)
229 if (q
->flags
& SIGQUEUE_PREALLOC
)
231 atomic_dec(&q
->user
->sigpending
);
233 kmem_cache_free(sigqueue_cachep
, q
);
236 void flush_sigqueue(struct sigpending
*queue
)
240 sigemptyset(&queue
->signal
);
241 while (!list_empty(&queue
->list
)) {
242 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
243 list_del_init(&q
->list
);
249 * Flush all pending signals for a task.
251 void flush_signals(struct task_struct
*t
)
255 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
256 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
257 flush_sigqueue(&t
->pending
);
258 flush_sigqueue(&t
->signal
->shared_pending
);
259 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
262 static void __flush_itimer_signals(struct sigpending
*pending
)
264 sigset_t signal
, retain
;
265 struct sigqueue
*q
, *n
;
267 signal
= pending
->signal
;
268 sigemptyset(&retain
);
270 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
271 int sig
= q
->info
.si_signo
;
273 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
274 sigaddset(&retain
, sig
);
276 sigdelset(&signal
, sig
);
277 list_del_init(&q
->list
);
282 sigorsets(&pending
->signal
, &signal
, &retain
);
285 void flush_itimer_signals(void)
287 struct task_struct
*tsk
= current
;
290 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
291 __flush_itimer_signals(&tsk
->pending
);
292 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
293 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
296 void ignore_signals(struct task_struct
*t
)
300 for (i
= 0; i
< _NSIG
; ++i
)
301 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
307 * Flush all handlers for a task.
311 flush_signal_handlers(struct task_struct
*t
, int force_default
)
314 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
315 for (i
= _NSIG
; i
!= 0 ; i
--) {
316 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
317 ka
->sa
.sa_handler
= SIG_DFL
;
319 sigemptyset(&ka
->sa
.sa_mask
);
324 int unhandled_signal(struct task_struct
*tsk
, int sig
)
326 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
327 if (is_global_init(tsk
))
329 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
331 return !tracehook_consider_fatal_signal(tsk
, sig
);
335 /* Notify the system that a driver wants to block all signals for this
336 * process, and wants to be notified if any signals at all were to be
337 * sent/acted upon. If the notifier routine returns non-zero, then the
338 * signal will be acted upon after all. If the notifier routine returns 0,
339 * then then signal will be blocked. Only one block per process is
340 * allowed. priv is a pointer to private data that the notifier routine
341 * can use to determine if the signal should be blocked or not. */
344 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
348 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
349 current
->notifier_mask
= mask
;
350 current
->notifier_data
= priv
;
351 current
->notifier
= notifier
;
352 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
355 /* Notify the system that blocking has ended. */
358 unblock_all_signals(void)
362 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
363 current
->notifier
= NULL
;
364 current
->notifier_data
= NULL
;
366 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
369 static void collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
371 struct sigqueue
*q
, *first
= NULL
;
374 * Collect the siginfo appropriate to this signal. Check if
375 * there is another siginfo for the same signal.
377 list_for_each_entry(q
, &list
->list
, list
) {
378 if (q
->info
.si_signo
== sig
) {
385 sigdelset(&list
->signal
, sig
);
389 list_del_init(&first
->list
);
390 copy_siginfo(info
, &first
->info
);
391 __sigqueue_free(first
);
393 /* Ok, it wasn't in the queue. This must be
394 a fast-pathed signal or we must have been
395 out of queue space. So zero out the info.
397 info
->si_signo
= sig
;
405 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
408 int sig
= next_signal(pending
, mask
);
411 if (current
->notifier
) {
412 if (sigismember(current
->notifier_mask
, sig
)) {
413 if (!(current
->notifier
)(current
->notifier_data
)) {
414 clear_thread_flag(TIF_SIGPENDING
);
420 collect_signal(sig
, pending
, info
);
427 * Dequeue a signal and return the element to the caller, which is
428 * expected to free it.
430 * All callers have to hold the siglock.
432 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
436 /* We only dequeue private signals from ourselves, we don't let
437 * signalfd steal them
439 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
441 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
446 * itimers are process shared and we restart periodic
447 * itimers in the signal delivery path to prevent DoS
448 * attacks in the high resolution timer case. This is
449 * compliant with the old way of self restarting
450 * itimers, as the SIGALRM is a legacy signal and only
451 * queued once. Changing the restart behaviour to
452 * restart the timer in the signal dequeue path is
453 * reducing the timer noise on heavy loaded !highres
456 if (unlikely(signr
== SIGALRM
)) {
457 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
459 if (!hrtimer_is_queued(tmr
) &&
460 tsk
->signal
->it_real_incr
.tv64
!= 0) {
461 hrtimer_forward(tmr
, tmr
->base
->get_time(),
462 tsk
->signal
->it_real_incr
);
463 hrtimer_restart(tmr
);
472 if (unlikely(sig_kernel_stop(signr
))) {
474 * Set a marker that we have dequeued a stop signal. Our
475 * caller might release the siglock and then the pending
476 * stop signal it is about to process is no longer in the
477 * pending bitmasks, but must still be cleared by a SIGCONT
478 * (and overruled by a SIGKILL). So those cases clear this
479 * shared flag after we've set it. Note that this flag may
480 * remain set after the signal we return is ignored or
481 * handled. That doesn't matter because its only purpose
482 * is to alert stop-signal processing code when another
483 * processor has come along and cleared the flag.
485 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
487 if ((info
->si_code
& __SI_MASK
) == __SI_TIMER
&& info
->si_sys_private
) {
489 * Release the siglock to ensure proper locking order
490 * of timer locks outside of siglocks. Note, we leave
491 * irqs disabled here, since the posix-timers code is
492 * about to disable them again anyway.
494 spin_unlock(&tsk
->sighand
->siglock
);
495 do_schedule_next_timer(info
);
496 spin_lock(&tsk
->sighand
->siglock
);
502 * Tell a process that it has a new active signal..
504 * NOTE! we rely on the previous spin_lock to
505 * lock interrupts for us! We can only be called with
506 * "siglock" held, and the local interrupt must
507 * have been disabled when that got acquired!
509 * No need to set need_resched since signal event passing
510 * goes through ->blocked
512 void signal_wake_up(struct task_struct
*t
, int resume
)
516 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
519 * For SIGKILL, we want to wake it up in the stopped/traced/killable
520 * case. We don't check t->state here because there is a race with it
521 * executing another processor and just now entering stopped state.
522 * By using wake_up_state, we ensure the process will wake up and
523 * handle its death signal.
525 mask
= TASK_INTERRUPTIBLE
;
527 mask
|= TASK_WAKEKILL
;
528 if (!wake_up_state(t
, mask
))
533 * Remove signals in mask from the pending set and queue.
534 * Returns 1 if any signals were found.
536 * All callers must be holding the siglock.
538 * This version takes a sigset mask and looks at all signals,
539 * not just those in the first mask word.
541 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
543 struct sigqueue
*q
, *n
;
546 sigandsets(&m
, mask
, &s
->signal
);
547 if (sigisemptyset(&m
))
550 signandsets(&s
->signal
, &s
->signal
, mask
);
551 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
552 if (sigismember(mask
, q
->info
.si_signo
)) {
553 list_del_init(&q
->list
);
560 * Remove signals in mask from the pending set and queue.
561 * Returns 1 if any signals were found.
563 * All callers must be holding the siglock.
565 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
567 struct sigqueue
*q
, *n
;
569 if (!sigtestsetmask(&s
->signal
, mask
))
572 sigdelsetmask(&s
->signal
, mask
);
573 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
574 if (q
->info
.si_signo
< SIGRTMIN
&&
575 (mask
& sigmask(q
->info
.si_signo
))) {
576 list_del_init(&q
->list
);
584 * Bad permissions for sending the signal
585 * - the caller must hold at least the RCU read lock
587 static int check_kill_permission(int sig
, struct siginfo
*info
,
588 struct task_struct
*t
)
590 const struct cred
*cred
= current_cred(), *tcred
;
594 if (!valid_signal(sig
))
597 if (info
!= SEND_SIG_NOINFO
&& (is_si_special(info
) || SI_FROMKERNEL(info
)))
600 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
604 tcred
= __task_cred(t
);
605 if ((cred
->euid
^ tcred
->suid
) &&
606 (cred
->euid
^ tcred
->uid
) &&
607 (cred
->uid
^ tcred
->suid
) &&
608 (cred
->uid
^ tcred
->uid
) &&
609 !capable(CAP_KILL
)) {
612 sid
= task_session(t
);
614 * We don't return the error if sid == NULL. The
615 * task was unhashed, the caller must notice this.
617 if (!sid
|| sid
== task_session(current
))
624 return security_task_kill(t
, info
, sig
, 0);
628 * Handle magic process-wide effects of stop/continue signals. Unlike
629 * the signal actions, these happen immediately at signal-generation
630 * time regardless of blocking, ignoring, or handling. This does the
631 * actual continuing for SIGCONT, but not the actual stopping for stop
632 * signals. The process stop is done as a signal action for SIG_DFL.
634 * Returns true if the signal should be actually delivered, otherwise
635 * it should be dropped.
637 static int prepare_signal(int sig
, struct task_struct
*p
)
639 struct signal_struct
*signal
= p
->signal
;
640 struct task_struct
*t
;
642 if (unlikely(signal
->flags
& SIGNAL_GROUP_EXIT
)) {
644 * The process is in the middle of dying, nothing to do.
646 } else if (sig_kernel_stop(sig
)) {
648 * This is a stop signal. Remove SIGCONT from all queues.
650 rm_from_queue(sigmask(SIGCONT
), &signal
->shared_pending
);
653 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
654 } while_each_thread(p
, t
);
655 } else if (sig
== SIGCONT
) {
658 * Remove all stop signals from all queues,
659 * and wake all threads.
661 rm_from_queue(SIG_KERNEL_STOP_MASK
, &signal
->shared_pending
);
665 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
667 * If there is a handler for SIGCONT, we must make
668 * sure that no thread returns to user mode before
669 * we post the signal, in case it was the only
670 * thread eligible to run the signal handler--then
671 * it must not do anything between resuming and
672 * running the handler. With the TIF_SIGPENDING
673 * flag set, the thread will pause and acquire the
674 * siglock that we hold now and until we've queued
675 * the pending signal.
677 * Wake up the stopped thread _after_ setting
680 state
= __TASK_STOPPED
;
681 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
682 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
683 state
|= TASK_INTERRUPTIBLE
;
685 wake_up_state(t
, state
);
686 } while_each_thread(p
, t
);
689 * Notify the parent with CLD_CONTINUED if we were stopped.
691 * If we were in the middle of a group stop, we pretend it
692 * was already finished, and then continued. Since SIGCHLD
693 * doesn't queue we report only CLD_STOPPED, as if the next
694 * CLD_CONTINUED was dropped.
697 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
698 why
|= SIGNAL_CLD_CONTINUED
;
699 else if (signal
->group_stop_count
)
700 why
|= SIGNAL_CLD_STOPPED
;
704 * The first thread which returns from finish_stop()
705 * will take ->siglock, notice SIGNAL_CLD_MASK, and
706 * notify its parent. See get_signal_to_deliver().
708 signal
->flags
= why
| SIGNAL_STOP_CONTINUED
;
709 signal
->group_stop_count
= 0;
710 signal
->group_exit_code
= 0;
713 * We are not stopped, but there could be a stop
714 * signal in the middle of being processed after
715 * being removed from the queue. Clear that too.
717 signal
->flags
&= ~SIGNAL_STOP_DEQUEUED
;
721 return !sig_ignored(p
, sig
);
725 * Test if P wants to take SIG. After we've checked all threads with this,
726 * it's equivalent to finding no threads not blocking SIG. Any threads not
727 * blocking SIG were ruled out because they are not running and already
728 * have pending signals. Such threads will dequeue from the shared queue
729 * as soon as they're available, so putting the signal on the shared queue
730 * will be equivalent to sending it to one such thread.
732 static inline int wants_signal(int sig
, struct task_struct
*p
)
734 if (sigismember(&p
->blocked
, sig
))
736 if (p
->flags
& PF_EXITING
)
740 if (task_is_stopped_or_traced(p
))
742 return task_curr(p
) || !signal_pending(p
);
745 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
747 struct signal_struct
*signal
= p
->signal
;
748 struct task_struct
*t
;
751 * Now find a thread we can wake up to take the signal off the queue.
753 * If the main thread wants the signal, it gets first crack.
754 * Probably the least surprising to the average bear.
756 if (wants_signal(sig
, p
))
758 else if (!group
|| thread_group_empty(p
))
760 * There is just one thread and it does not need to be woken.
761 * It will dequeue unblocked signals before it runs again.
766 * Otherwise try to find a suitable thread.
768 t
= signal
->curr_target
;
769 while (!wants_signal(sig
, t
)) {
771 if (t
== signal
->curr_target
)
773 * No thread needs to be woken.
774 * Any eligible threads will see
775 * the signal in the queue soon.
779 signal
->curr_target
= t
;
783 * Found a killable thread. If the signal will be fatal,
784 * then start taking the whole group down immediately.
786 if (sig_fatal(p
, sig
) &&
787 !(signal
->flags
& (SIGNAL_UNKILLABLE
| SIGNAL_GROUP_EXIT
)) &&
788 !sigismember(&t
->real_blocked
, sig
) &&
790 !tracehook_consider_fatal_signal(t
, sig
))) {
792 * This signal will be fatal to the whole group.
794 if (!sig_kernel_coredump(sig
)) {
796 * Start a group exit and wake everybody up.
797 * This way we don't have other threads
798 * running and doing things after a slower
799 * thread has the fatal signal pending.
801 signal
->flags
= SIGNAL_GROUP_EXIT
;
802 signal
->group_exit_code
= sig
;
803 signal
->group_stop_count
= 0;
806 sigaddset(&t
->pending
.signal
, SIGKILL
);
807 signal_wake_up(t
, 1);
808 } while_each_thread(p
, t
);
814 * The signal is already in the shared-pending queue.
815 * Tell the chosen thread to wake up and dequeue it.
817 signal_wake_up(t
, sig
== SIGKILL
);
821 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
823 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
826 static int __send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
827 int group
, int from_ancestor_ns
)
829 struct sigpending
*pending
;
832 trace_sched_signal_send(sig
, t
);
834 assert_spin_locked(&t
->sighand
->siglock
);
835 if (!prepare_signal(sig
, t
))
838 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
840 * Short-circuit ignored signals and support queuing
841 * exactly one non-rt signal, so that we can get more
842 * detailed information about the cause of the signal.
844 if (legacy_queue(pending
, sig
))
847 * fast-pathed signals for kernel-internal things like SIGSTOP
850 if (info
== SEND_SIG_FORCED
)
853 /* Real-time signals must be queued if sent by sigqueue, or
854 some other real-time mechanism. It is implementation
855 defined whether kill() does so. We attempt to do so, on
856 the principle of least surprise, but since kill is not
857 allowed to fail with EAGAIN when low on memory we just
858 make sure at least one signal gets delivered and don't
859 pass on the info struct. */
861 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
862 (is_si_special(info
) ||
863 info
->si_code
>= 0)));
865 list_add_tail(&q
->list
, &pending
->list
);
866 switch ((unsigned long) info
) {
867 case (unsigned long) SEND_SIG_NOINFO
:
868 q
->info
.si_signo
= sig
;
869 q
->info
.si_errno
= 0;
870 q
->info
.si_code
= SI_USER
;
871 q
->info
.si_pid
= task_tgid_nr_ns(current
,
872 task_active_pid_ns(t
));
873 q
->info
.si_uid
= current_uid();
875 case (unsigned long) SEND_SIG_PRIV
:
876 q
->info
.si_signo
= sig
;
877 q
->info
.si_errno
= 0;
878 q
->info
.si_code
= SI_KERNEL
;
883 copy_siginfo(&q
->info
, info
);
886 } else if (!is_si_special(info
)) {
887 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
)
889 * Queue overflow, abort. We may abort if the signal was rt
890 * and sent by user using something other than kill().
896 signalfd_notify(t
, sig
);
897 sigaddset(&pending
->signal
, sig
);
898 complete_signal(sig
, t
, group
);
902 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
905 return __send_signal(sig
, info
, t
, group
, 0);
908 int print_fatal_signals
;
910 static void print_fatal_signal(struct pt_regs
*regs
, int signr
)
912 printk("%s/%d: potentially unexpected fatal signal %d.\n",
913 current
->comm
, task_pid_nr(current
), signr
);
915 #if defined(__i386__) && !defined(__arch_um__)
916 printk("code at %08lx: ", regs
->ip
);
919 for (i
= 0; i
< 16; i
++) {
922 __get_user(insn
, (unsigned char *)(regs
->ip
+ i
));
923 printk("%02x ", insn
);
933 static int __init
setup_print_fatal_signals(char *str
)
935 get_option (&str
, &print_fatal_signals
);
940 __setup("print-fatal-signals=", setup_print_fatal_signals
);
943 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
945 return send_signal(sig
, info
, p
, 1);
949 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
951 return send_signal(sig
, info
, t
, 0);
955 * Force a signal that the process can't ignore: if necessary
956 * we unblock the signal and change any SIG_IGN to SIG_DFL.
958 * Note: If we unblock the signal, we always reset it to SIG_DFL,
959 * since we do not want to have a signal handler that was blocked
960 * be invoked when user space had explicitly blocked it.
962 * We don't want to have recursive SIGSEGV's etc, for example,
963 * that is why we also clear SIGNAL_UNKILLABLE.
966 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
968 unsigned long int flags
;
969 int ret
, blocked
, ignored
;
970 struct k_sigaction
*action
;
972 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
973 action
= &t
->sighand
->action
[sig
-1];
974 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
975 blocked
= sigismember(&t
->blocked
, sig
);
976 if (blocked
|| ignored
) {
977 action
->sa
.sa_handler
= SIG_DFL
;
979 sigdelset(&t
->blocked
, sig
);
980 recalc_sigpending_and_wake(t
);
983 if (action
->sa
.sa_handler
== SIG_DFL
)
984 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
985 ret
= specific_send_sig_info(sig
, info
, t
);
986 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
992 force_sig_specific(int sig
, struct task_struct
*t
)
994 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
998 * Nuke all other threads in the group.
1000 void zap_other_threads(struct task_struct
*p
)
1002 struct task_struct
*t
;
1004 p
->signal
->group_stop_count
= 0;
1006 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1008 * Don't bother with already dead threads
1013 /* SIGKILL will be handled before any pending SIGSTOP */
1014 sigaddset(&t
->pending
.signal
, SIGKILL
);
1015 signal_wake_up(t
, 1);
1019 int __fatal_signal_pending(struct task_struct
*tsk
)
1021 return sigismember(&tsk
->pending
.signal
, SIGKILL
);
1023 EXPORT_SYMBOL(__fatal_signal_pending
);
1025 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
1027 struct sighand_struct
*sighand
;
1031 sighand
= rcu_dereference(tsk
->sighand
);
1032 if (unlikely(sighand
== NULL
))
1035 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1036 if (likely(sighand
== tsk
->sighand
))
1038 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1046 * send signal info to all the members of a group
1047 * - the caller must hold the RCU read lock at least
1049 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1051 unsigned long flags
;
1054 ret
= check_kill_permission(sig
, info
, p
);
1058 if (lock_task_sighand(p
, &flags
)) {
1059 ret
= __group_send_sig_info(sig
, info
, p
);
1060 unlock_task_sighand(p
, &flags
);
1068 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1069 * control characters do (^C, ^Z etc)
1070 * - the caller must hold at least a readlock on tasklist_lock
1072 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1074 struct task_struct
*p
= NULL
;
1075 int retval
, success
;
1079 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1080 int err
= group_send_sig_info(sig
, info
, p
);
1083 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1084 return success
? 0 : retval
;
1087 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1090 struct task_struct
*p
;
1094 p
= pid_task(pid
, PIDTYPE_PID
);
1096 error
= group_send_sig_info(sig
, info
, p
);
1097 if (unlikely(error
== -ESRCH
))
1099 * The task was unhashed in between, try again.
1100 * If it is dead, pid_task() will return NULL,
1101 * if we race with de_thread() it will find the
1112 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1116 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1121 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1122 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1123 uid_t uid
, uid_t euid
, u32 secid
)
1126 struct task_struct
*p
;
1127 const struct cred
*pcred
;
1129 if (!valid_signal(sig
))
1132 read_lock(&tasklist_lock
);
1133 p
= pid_task(pid
, PIDTYPE_PID
);
1138 pcred
= __task_cred(p
);
1139 if ((info
== SEND_SIG_NOINFO
||
1140 (!is_si_special(info
) && SI_FROMUSER(info
))) &&
1141 euid
!= pcred
->suid
&& euid
!= pcred
->uid
&&
1142 uid
!= pcred
->suid
&& uid
!= pcred
->uid
) {
1146 ret
= security_task_kill(p
, info
, sig
, secid
);
1149 if (sig
&& p
->sighand
) {
1150 unsigned long flags
;
1151 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1152 ret
= __send_signal(sig
, info
, p
, 1, 0);
1153 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1156 read_unlock(&tasklist_lock
);
1159 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1162 * kill_something_info() interprets pid in interesting ways just like kill(2).
1164 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1165 * is probably wrong. Should make it like BSD or SYSV.
1168 static int kill_something_info(int sig
, struct siginfo
*info
, pid_t pid
)
1174 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1179 read_lock(&tasklist_lock
);
1181 ret
= __kill_pgrp_info(sig
, info
,
1182 pid
? find_vpid(-pid
) : task_pgrp(current
));
1184 int retval
= 0, count
= 0;
1185 struct task_struct
* p
;
1187 for_each_process(p
) {
1188 if (task_pid_vnr(p
) > 1 &&
1189 !same_thread_group(p
, current
)) {
1190 int err
= group_send_sig_info(sig
, info
, p
);
1196 ret
= count
? retval
: -ESRCH
;
1198 read_unlock(&tasklist_lock
);
1204 * These are for backward compatibility with the rest of the kernel source.
1208 * The caller must ensure the task can't exit.
1211 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1214 unsigned long flags
;
1217 * Make sure legacy kernel users don't send in bad values
1218 * (normal paths check this in check_kill_permission).
1220 if (!valid_signal(sig
))
1223 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1224 ret
= specific_send_sig_info(sig
, info
, p
);
1225 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1229 #define __si_special(priv) \
1230 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1233 send_sig(int sig
, struct task_struct
*p
, int priv
)
1235 return send_sig_info(sig
, __si_special(priv
), p
);
1239 force_sig(int sig
, struct task_struct
*p
)
1241 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1245 * When things go south during signal handling, we
1246 * will force a SIGSEGV. And if the signal that caused
1247 * the problem was already a SIGSEGV, we'll want to
1248 * make sure we don't even try to deliver the signal..
1251 force_sigsegv(int sig
, struct task_struct
*p
)
1253 if (sig
== SIGSEGV
) {
1254 unsigned long flags
;
1255 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1256 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1257 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1259 force_sig(SIGSEGV
, p
);
1263 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1267 read_lock(&tasklist_lock
);
1268 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1269 read_unlock(&tasklist_lock
);
1273 EXPORT_SYMBOL(kill_pgrp
);
1275 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1277 return kill_pid_info(sig
, __si_special(priv
), pid
);
1279 EXPORT_SYMBOL(kill_pid
);
1282 * These functions support sending signals using preallocated sigqueue
1283 * structures. This is needed "because realtime applications cannot
1284 * afford to lose notifications of asynchronous events, like timer
1285 * expirations or I/O completions". In the case of Posix Timers
1286 * we allocate the sigqueue structure from the timer_create. If this
1287 * allocation fails we are able to report the failure to the application
1288 * with an EAGAIN error.
1291 struct sigqueue
*sigqueue_alloc(void)
1295 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1296 q
->flags
|= SIGQUEUE_PREALLOC
;
1300 void sigqueue_free(struct sigqueue
*q
)
1302 unsigned long flags
;
1303 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1305 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1307 * We must hold ->siglock while testing q->list
1308 * to serialize with collect_signal() or with
1309 * __exit_signal()->flush_sigqueue().
1311 spin_lock_irqsave(lock
, flags
);
1312 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1314 * If it is queued it will be freed when dequeued,
1315 * like the "regular" sigqueue.
1317 if (!list_empty(&q
->list
))
1319 spin_unlock_irqrestore(lock
, flags
);
1325 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1327 int sig
= q
->info
.si_signo
;
1328 struct sigpending
*pending
;
1329 unsigned long flags
;
1332 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1335 if (!likely(lock_task_sighand(t
, &flags
)))
1338 ret
= 1; /* the signal is ignored */
1339 if (!prepare_signal(sig
, t
))
1343 if (unlikely(!list_empty(&q
->list
))) {
1345 * If an SI_TIMER entry is already queue just increment
1346 * the overrun count.
1348 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1349 q
->info
.si_overrun
++;
1352 q
->info
.si_overrun
= 0;
1354 signalfd_notify(t
, sig
);
1355 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1356 list_add_tail(&q
->list
, &pending
->list
);
1357 sigaddset(&pending
->signal
, sig
);
1358 complete_signal(sig
, t
, group
);
1360 unlock_task_sighand(t
, &flags
);
1366 * Wake up any threads in the parent blocked in wait* syscalls.
1368 static inline void __wake_up_parent(struct task_struct
*p
,
1369 struct task_struct
*parent
)
1371 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1375 * Let a parent know about the death of a child.
1376 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1378 * Returns -1 if our parent ignored us and so we've switched to
1379 * self-reaping, or else @sig.
1381 int do_notify_parent(struct task_struct
*tsk
, int sig
)
1383 struct siginfo info
;
1384 unsigned long flags
;
1385 struct sighand_struct
*psig
;
1390 /* do_notify_parent_cldstop should have been called instead. */
1391 BUG_ON(task_is_stopped_or_traced(tsk
));
1393 BUG_ON(!tsk
->ptrace
&&
1394 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1396 info
.si_signo
= sig
;
1399 * we are under tasklist_lock here so our parent is tied to
1400 * us and cannot exit and release its namespace.
1402 * the only it can is to switch its nsproxy with sys_unshare,
1403 * bu uncharing pid namespaces is not allowed, so we'll always
1404 * see relevant namespace
1406 * write_lock() currently calls preempt_disable() which is the
1407 * same as rcu_read_lock(), but according to Oleg, this is not
1408 * correct to rely on this
1411 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1412 info
.si_uid
= __task_cred(tsk
)->uid
;
1415 info
.si_utime
= cputime_to_clock_t(cputime_add(tsk
->utime
,
1416 tsk
->signal
->utime
));
1417 info
.si_stime
= cputime_to_clock_t(cputime_add(tsk
->stime
,
1418 tsk
->signal
->stime
));
1420 info
.si_status
= tsk
->exit_code
& 0x7f;
1421 if (tsk
->exit_code
& 0x80)
1422 info
.si_code
= CLD_DUMPED
;
1423 else if (tsk
->exit_code
& 0x7f)
1424 info
.si_code
= CLD_KILLED
;
1426 info
.si_code
= CLD_EXITED
;
1427 info
.si_status
= tsk
->exit_code
>> 8;
1430 psig
= tsk
->parent
->sighand
;
1431 spin_lock_irqsave(&psig
->siglock
, flags
);
1432 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1433 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1434 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1436 * We are exiting and our parent doesn't care. POSIX.1
1437 * defines special semantics for setting SIGCHLD to SIG_IGN
1438 * or setting the SA_NOCLDWAIT flag: we should be reaped
1439 * automatically and not left for our parent's wait4 call.
1440 * Rather than having the parent do it as a magic kind of
1441 * signal handler, we just set this to tell do_exit that we
1442 * can be cleaned up without becoming a zombie. Note that
1443 * we still call __wake_up_parent in this case, because a
1444 * blocked sys_wait4 might now return -ECHILD.
1446 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1447 * is implementation-defined: we do (if you don't want
1448 * it, just use SIG_IGN instead).
1450 ret
= tsk
->exit_signal
= -1;
1451 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1454 if (valid_signal(sig
) && sig
> 0)
1455 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1456 __wake_up_parent(tsk
, tsk
->parent
);
1457 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1462 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1464 struct siginfo info
;
1465 unsigned long flags
;
1466 struct task_struct
*parent
;
1467 struct sighand_struct
*sighand
;
1469 if (tsk
->ptrace
& PT_PTRACED
)
1470 parent
= tsk
->parent
;
1472 tsk
= tsk
->group_leader
;
1473 parent
= tsk
->real_parent
;
1476 info
.si_signo
= SIGCHLD
;
1479 * see comment in do_notify_parent() abot the following 3 lines
1482 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1483 info
.si_uid
= __task_cred(tsk
)->uid
;
1486 info
.si_utime
= cputime_to_clock_t(tsk
->utime
);
1487 info
.si_stime
= cputime_to_clock_t(tsk
->stime
);
1492 info
.si_status
= SIGCONT
;
1495 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1498 info
.si_status
= tsk
->exit_code
& 0x7f;
1504 sighand
= parent
->sighand
;
1505 spin_lock_irqsave(&sighand
->siglock
, flags
);
1506 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1507 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1508 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1510 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1512 __wake_up_parent(tsk
, parent
);
1513 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1516 static inline int may_ptrace_stop(void)
1518 if (!likely(current
->ptrace
& PT_PTRACED
))
1521 * Are we in the middle of do_coredump?
1522 * If so and our tracer is also part of the coredump stopping
1523 * is a deadlock situation, and pointless because our tracer
1524 * is dead so don't allow us to stop.
1525 * If SIGKILL was already sent before the caller unlocked
1526 * ->siglock we must see ->core_state != NULL. Otherwise it
1527 * is safe to enter schedule().
1529 if (unlikely(current
->mm
->core_state
) &&
1530 unlikely(current
->mm
== current
->parent
->mm
))
1537 * Return nonzero if there is a SIGKILL that should be waking us up.
1538 * Called with the siglock held.
1540 static int sigkill_pending(struct task_struct
*tsk
)
1542 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1543 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
1547 * This must be called with current->sighand->siglock held.
1549 * This should be the path for all ptrace stops.
1550 * We always set current->last_siginfo while stopped here.
1551 * That makes it a way to test a stopped process for
1552 * being ptrace-stopped vs being job-control-stopped.
1554 * If we actually decide not to stop at all because the tracer
1555 * is gone, we keep current->exit_code unless clear_code.
1557 static void ptrace_stop(int exit_code
, int clear_code
, siginfo_t
*info
)
1559 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1561 * The arch code has something special to do before a
1562 * ptrace stop. This is allowed to block, e.g. for faults
1563 * on user stack pages. We can't keep the siglock while
1564 * calling arch_ptrace_stop, so we must release it now.
1565 * To preserve proper semantics, we must do this before
1566 * any signal bookkeeping like checking group_stop_count.
1567 * Meanwhile, a SIGKILL could come in before we retake the
1568 * siglock. That must prevent us from sleeping in TASK_TRACED.
1569 * So after regaining the lock, we must check for SIGKILL.
1571 spin_unlock_irq(¤t
->sighand
->siglock
);
1572 arch_ptrace_stop(exit_code
, info
);
1573 spin_lock_irq(¤t
->sighand
->siglock
);
1574 if (sigkill_pending(current
))
1579 * If there is a group stop in progress,
1580 * we must participate in the bookkeeping.
1582 if (current
->signal
->group_stop_count
> 0)
1583 --current
->signal
->group_stop_count
;
1585 current
->last_siginfo
= info
;
1586 current
->exit_code
= exit_code
;
1588 /* Let the debugger run. */
1589 __set_current_state(TASK_TRACED
);
1590 spin_unlock_irq(¤t
->sighand
->siglock
);
1591 read_lock(&tasklist_lock
);
1592 if (may_ptrace_stop()) {
1593 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1595 * Don't want to allow preemption here, because
1596 * sys_ptrace() needs this task to be inactive.
1598 * XXX: implement read_unlock_no_resched().
1601 read_unlock(&tasklist_lock
);
1602 preempt_enable_no_resched();
1606 * By the time we got the lock, our tracer went away.
1607 * Don't drop the lock yet, another tracer may come.
1609 __set_current_state(TASK_RUNNING
);
1611 current
->exit_code
= 0;
1612 read_unlock(&tasklist_lock
);
1616 * While in TASK_TRACED, we were considered "frozen enough".
1617 * Now that we woke up, it's crucial if we're supposed to be
1618 * frozen that we freeze now before running anything substantial.
1623 * We are back. Now reacquire the siglock before touching
1624 * last_siginfo, so that we are sure to have synchronized with
1625 * any signal-sending on another CPU that wants to examine it.
1627 spin_lock_irq(¤t
->sighand
->siglock
);
1628 current
->last_siginfo
= NULL
;
1631 * Queued signals ignored us while we were stopped for tracing.
1632 * So check for any that we should take before resuming user mode.
1633 * This sets TIF_SIGPENDING, but never clears it.
1635 recalc_sigpending_tsk(current
);
1638 void ptrace_notify(int exit_code
)
1642 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1644 memset(&info
, 0, sizeof info
);
1645 info
.si_signo
= SIGTRAP
;
1646 info
.si_code
= exit_code
;
1647 info
.si_pid
= task_pid_vnr(current
);
1648 info
.si_uid
= current_uid();
1650 /* Let the debugger run. */
1651 spin_lock_irq(¤t
->sighand
->siglock
);
1652 ptrace_stop(exit_code
, 1, &info
);
1653 spin_unlock_irq(¤t
->sighand
->siglock
);
1657 finish_stop(int stop_count
)
1660 * If there are no other threads in the group, or if there is
1661 * a group stop in progress and we are the last to stop,
1662 * report to the parent. When ptraced, every thread reports itself.
1664 if (tracehook_notify_jctl(stop_count
== 0, CLD_STOPPED
)) {
1665 read_lock(&tasklist_lock
);
1666 do_notify_parent_cldstop(current
, CLD_STOPPED
);
1667 read_unlock(&tasklist_lock
);
1672 } while (try_to_freeze());
1674 * Now we don't run again until continued.
1676 current
->exit_code
= 0;
1680 * This performs the stopping for SIGSTOP and other stop signals.
1681 * We have to stop all threads in the thread group.
1682 * Returns nonzero if we've actually stopped and released the siglock.
1683 * Returns zero if we didn't stop and still hold the siglock.
1685 static int do_signal_stop(int signr
)
1687 struct signal_struct
*sig
= current
->signal
;
1690 if (sig
->group_stop_count
> 0) {
1692 * There is a group stop in progress. We don't need to
1693 * start another one.
1695 stop_count
= --sig
->group_stop_count
;
1697 struct task_struct
*t
;
1699 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
) ||
1700 unlikely(signal_group_exit(sig
)))
1703 * There is no group stop already in progress.
1704 * We must initiate one now.
1706 sig
->group_exit_code
= signr
;
1709 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1711 * Setting state to TASK_STOPPED for a group
1712 * stop is always done with the siglock held,
1713 * so this check has no races.
1715 if (!(t
->flags
& PF_EXITING
) &&
1716 !task_is_stopped_or_traced(t
)) {
1718 signal_wake_up(t
, 0);
1720 sig
->group_stop_count
= stop_count
;
1723 if (stop_count
== 0)
1724 sig
->flags
= SIGNAL_STOP_STOPPED
;
1725 current
->exit_code
= sig
->group_exit_code
;
1726 __set_current_state(TASK_STOPPED
);
1728 spin_unlock_irq(¤t
->sighand
->siglock
);
1729 finish_stop(stop_count
);
1733 static int ptrace_signal(int signr
, siginfo_t
*info
,
1734 struct pt_regs
*regs
, void *cookie
)
1736 if (!(current
->ptrace
& PT_PTRACED
))
1739 ptrace_signal_deliver(regs
, cookie
);
1741 /* Let the debugger run. */
1742 ptrace_stop(signr
, 0, info
);
1744 /* We're back. Did the debugger cancel the sig? */
1745 signr
= current
->exit_code
;
1749 current
->exit_code
= 0;
1751 /* Update the siginfo structure if the signal has
1752 changed. If the debugger wanted something
1753 specific in the siginfo structure then it should
1754 have updated *info via PTRACE_SETSIGINFO. */
1755 if (signr
!= info
->si_signo
) {
1756 info
->si_signo
= signr
;
1758 info
->si_code
= SI_USER
;
1759 info
->si_pid
= task_pid_vnr(current
->parent
);
1760 info
->si_uid
= task_uid(current
->parent
);
1763 /* If the (new) signal is now blocked, requeue it. */
1764 if (sigismember(¤t
->blocked
, signr
)) {
1765 specific_send_sig_info(signr
, info
, current
);
1772 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1773 struct pt_regs
*regs
, void *cookie
)
1775 struct sighand_struct
*sighand
= current
->sighand
;
1776 struct signal_struct
*signal
= current
->signal
;
1781 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1782 * While in TASK_STOPPED, we were considered "frozen enough".
1783 * Now that we woke up, it's crucial if we're supposed to be
1784 * frozen that we freeze now before running anything substantial.
1788 spin_lock_irq(&sighand
->siglock
);
1790 * Every stopped thread goes here after wakeup. Check to see if
1791 * we should notify the parent, prepare_signal(SIGCONT) encodes
1792 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1794 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
1795 int why
= (signal
->flags
& SIGNAL_STOP_CONTINUED
)
1796 ? CLD_CONTINUED
: CLD_STOPPED
;
1797 signal
->flags
&= ~SIGNAL_CLD_MASK
;
1798 spin_unlock_irq(&sighand
->siglock
);
1800 if (unlikely(!tracehook_notify_jctl(1, why
)))
1803 read_lock(&tasklist_lock
);
1804 do_notify_parent_cldstop(current
->group_leader
, why
);
1805 read_unlock(&tasklist_lock
);
1810 struct k_sigaction
*ka
;
1812 if (unlikely(signal
->group_stop_count
> 0) &&
1817 * Tracing can induce an artifical signal and choose sigaction.
1818 * The return value in @signr determines the default action,
1819 * but @info->si_signo is the signal number we will report.
1821 signr
= tracehook_get_signal(current
, regs
, info
, return_ka
);
1822 if (unlikely(signr
< 0))
1824 if (unlikely(signr
!= 0))
1827 signr
= dequeue_signal(current
, ¤t
->blocked
,
1831 break; /* will return 0 */
1833 if (signr
!= SIGKILL
) {
1834 signr
= ptrace_signal(signr
, info
,
1840 ka
= &sighand
->action
[signr
-1];
1843 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1845 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1846 /* Run the handler. */
1849 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1850 ka
->sa
.sa_handler
= SIG_DFL
;
1852 break; /* will return non-zero "signr" value */
1856 * Now we are doing the default action for this signal.
1858 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1862 * Global init gets no signals it doesn't want.
1864 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
1865 !signal_group_exit(signal
))
1868 if (sig_kernel_stop(signr
)) {
1870 * The default action is to stop all threads in
1871 * the thread group. The job control signals
1872 * do nothing in an orphaned pgrp, but SIGSTOP
1873 * always works. Note that siglock needs to be
1874 * dropped during the call to is_orphaned_pgrp()
1875 * because of lock ordering with tasklist_lock.
1876 * This allows an intervening SIGCONT to be posted.
1877 * We need to check for that and bail out if necessary.
1879 if (signr
!= SIGSTOP
) {
1880 spin_unlock_irq(&sighand
->siglock
);
1882 /* signals can be posted during this window */
1884 if (is_current_pgrp_orphaned())
1887 spin_lock_irq(&sighand
->siglock
);
1890 if (likely(do_signal_stop(info
->si_signo
))) {
1891 /* It released the siglock. */
1896 * We didn't actually stop, due to a race
1897 * with SIGCONT or something like that.
1902 spin_unlock_irq(&sighand
->siglock
);
1905 * Anything else is fatal, maybe with a core dump.
1907 current
->flags
|= PF_SIGNALED
;
1909 if (sig_kernel_coredump(signr
)) {
1910 if (print_fatal_signals
)
1911 print_fatal_signal(regs
, info
->si_signo
);
1913 * If it was able to dump core, this kills all
1914 * other threads in the group and synchronizes with
1915 * their demise. If we lost the race with another
1916 * thread getting here, it set group_exit_code
1917 * first and our do_group_exit call below will use
1918 * that value and ignore the one we pass it.
1920 do_coredump(info
->si_signo
, info
->si_signo
, regs
);
1924 * Death signals, no core dump.
1926 do_group_exit(info
->si_signo
);
1929 spin_unlock_irq(&sighand
->siglock
);
1933 void exit_signals(struct task_struct
*tsk
)
1936 struct task_struct
*t
;
1938 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
1939 tsk
->flags
|= PF_EXITING
;
1943 spin_lock_irq(&tsk
->sighand
->siglock
);
1945 * From now this task is not visible for group-wide signals,
1946 * see wants_signal(), do_signal_stop().
1948 tsk
->flags
|= PF_EXITING
;
1949 if (!signal_pending(tsk
))
1952 /* It could be that __group_complete_signal() choose us to
1953 * notify about group-wide signal. Another thread should be
1954 * woken now to take the signal since we will not.
1956 for (t
= tsk
; (t
= next_thread(t
)) != tsk
; )
1957 if (!signal_pending(t
) && !(t
->flags
& PF_EXITING
))
1958 recalc_sigpending_and_wake(t
);
1960 if (unlikely(tsk
->signal
->group_stop_count
) &&
1961 !--tsk
->signal
->group_stop_count
) {
1962 tsk
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1966 spin_unlock_irq(&tsk
->sighand
->siglock
);
1968 if (unlikely(group_stop
) && tracehook_notify_jctl(1, CLD_STOPPED
)) {
1969 read_lock(&tasklist_lock
);
1970 do_notify_parent_cldstop(tsk
, CLD_STOPPED
);
1971 read_unlock(&tasklist_lock
);
1975 EXPORT_SYMBOL(recalc_sigpending
);
1976 EXPORT_SYMBOL_GPL(dequeue_signal
);
1977 EXPORT_SYMBOL(flush_signals
);
1978 EXPORT_SYMBOL(force_sig
);
1979 EXPORT_SYMBOL(send_sig
);
1980 EXPORT_SYMBOL(send_sig_info
);
1981 EXPORT_SYMBOL(sigprocmask
);
1982 EXPORT_SYMBOL(block_all_signals
);
1983 EXPORT_SYMBOL(unblock_all_signals
);
1987 * System call entry points.
1990 SYSCALL_DEFINE0(restart_syscall
)
1992 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1993 return restart
->fn(restart
);
1996 long do_no_restart_syscall(struct restart_block
*param
)
2002 * We don't need to get the kernel lock - this is all local to this
2003 * particular thread.. (and that's good, because this is _heavily_
2004 * used by various programs)
2008 * This is also useful for kernel threads that want to temporarily
2009 * (or permanently) block certain signals.
2011 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2012 * interface happily blocks "unblockable" signals like SIGKILL
2015 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2019 spin_lock_irq(¤t
->sighand
->siglock
);
2021 *oldset
= current
->blocked
;
2026 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
2029 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
2032 current
->blocked
= *set
;
2037 recalc_sigpending();
2038 spin_unlock_irq(¤t
->sighand
->siglock
);
2043 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, set
,
2044 sigset_t __user
*, oset
, size_t, sigsetsize
)
2046 int error
= -EINVAL
;
2047 sigset_t old_set
, new_set
;
2049 /* XXX: Don't preclude handling different sized sigset_t's. */
2050 if (sigsetsize
!= sizeof(sigset_t
))
2055 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2057 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2059 error
= sigprocmask(how
, &new_set
, &old_set
);
2065 spin_lock_irq(¤t
->sighand
->siglock
);
2066 old_set
= current
->blocked
;
2067 spin_unlock_irq(¤t
->sighand
->siglock
);
2071 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2079 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2081 long error
= -EINVAL
;
2084 if (sigsetsize
> sizeof(sigset_t
))
2087 spin_lock_irq(¤t
->sighand
->siglock
);
2088 sigorsets(&pending
, ¤t
->pending
.signal
,
2089 ¤t
->signal
->shared_pending
.signal
);
2090 spin_unlock_irq(¤t
->sighand
->siglock
);
2092 /* Outside the lock because only this thread touches it. */
2093 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2096 if (!copy_to_user(set
, &pending
, sigsetsize
))
2103 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, set
, size_t, sigsetsize
)
2105 return do_sigpending(set
, sigsetsize
);
2108 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2110 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2114 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2116 if (from
->si_code
< 0)
2117 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2120 * If you change siginfo_t structure, please be sure
2121 * this code is fixed accordingly.
2122 * Please remember to update the signalfd_copyinfo() function
2123 * inside fs/signalfd.c too, in case siginfo_t changes.
2124 * It should never copy any pad contained in the structure
2125 * to avoid security leaks, but must copy the generic
2126 * 3 ints plus the relevant union member.
2128 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2129 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2130 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2131 switch (from
->si_code
& __SI_MASK
) {
2133 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2134 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2137 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2138 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2139 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2142 err
|= __put_user(from
->si_band
, &to
->si_band
);
2143 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2146 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2147 #ifdef __ARCH_SI_TRAPNO
2148 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2152 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2153 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2154 err
|= __put_user(from
->si_status
, &to
->si_status
);
2155 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2156 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2158 case __SI_RT
: /* This is not generated by the kernel as of now. */
2159 case __SI_MESGQ
: /* But this is */
2160 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2161 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2162 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2164 default: /* this is just in case for now ... */
2165 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2166 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2174 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
2175 siginfo_t __user
*, uinfo
, const struct timespec __user
*, uts
,
2184 /* XXX: Don't preclude handling different sized sigset_t's. */
2185 if (sigsetsize
!= sizeof(sigset_t
))
2188 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2192 * Invert the set of allowed signals to get those we
2195 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2199 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2201 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2206 spin_lock_irq(¤t
->sighand
->siglock
);
2207 sig
= dequeue_signal(current
, &these
, &info
);
2209 timeout
= MAX_SCHEDULE_TIMEOUT
;
2211 timeout
= (timespec_to_jiffies(&ts
)
2212 + (ts
.tv_sec
|| ts
.tv_nsec
));
2215 /* None ready -- temporarily unblock those we're
2216 * interested while we are sleeping in so that we'll
2217 * be awakened when they arrive. */
2218 current
->real_blocked
= current
->blocked
;
2219 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2220 recalc_sigpending();
2221 spin_unlock_irq(¤t
->sighand
->siglock
);
2223 timeout
= schedule_timeout_interruptible(timeout
);
2225 spin_lock_irq(¤t
->sighand
->siglock
);
2226 sig
= dequeue_signal(current
, &these
, &info
);
2227 current
->blocked
= current
->real_blocked
;
2228 siginitset(¤t
->real_blocked
, 0);
2229 recalc_sigpending();
2232 spin_unlock_irq(¤t
->sighand
->siglock
);
2237 if (copy_siginfo_to_user(uinfo
, &info
))
2249 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
2251 struct siginfo info
;
2253 info
.si_signo
= sig
;
2255 info
.si_code
= SI_USER
;
2256 info
.si_pid
= task_tgid_vnr(current
);
2257 info
.si_uid
= current_uid();
2259 return kill_something_info(sig
, &info
, pid
);
2262 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
2265 struct siginfo info
;
2266 struct task_struct
*p
;
2267 unsigned long flags
;
2270 info
.si_signo
= sig
;
2272 info
.si_code
= SI_TKILL
;
2273 info
.si_pid
= task_tgid_vnr(current
);
2274 info
.si_uid
= current_uid();
2277 p
= find_task_by_vpid(pid
);
2278 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
2279 error
= check_kill_permission(sig
, &info
, p
);
2281 * The null signal is a permissions and process existence
2282 * probe. No signal is actually delivered.
2284 * If lock_task_sighand() fails we pretend the task dies
2285 * after receiving the signal. The window is tiny, and the
2286 * signal is private anyway.
2288 if (!error
&& sig
&& lock_task_sighand(p
, &flags
)) {
2289 error
= specific_send_sig_info(sig
, &info
, p
);
2290 unlock_task_sighand(p
, &flags
);
2299 * sys_tgkill - send signal to one specific thread
2300 * @tgid: the thread group ID of the thread
2301 * @pid: the PID of the thread
2302 * @sig: signal to be sent
2304 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2305 * exists but it's not belonging to the target process anymore. This
2306 * method solves the problem of threads exiting and PIDs getting reused.
2308 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
2310 /* This is only valid for single tasks */
2311 if (pid
<= 0 || tgid
<= 0)
2314 return do_tkill(tgid
, pid
, sig
);
2318 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2320 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
2322 /* This is only valid for single tasks */
2326 return do_tkill(0, pid
, sig
);
2329 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
2330 siginfo_t __user
*, uinfo
)
2334 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2337 /* Not even root can pretend to send signals from the kernel.
2338 Nor can they impersonate a kill(), which adds source info. */
2339 if (info
.si_code
>= 0)
2341 info
.si_signo
= sig
;
2343 /* POSIX.1b doesn't mention process groups. */
2344 return kill_proc_info(sig
, &info
, pid
);
2347 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2349 struct task_struct
*t
= current
;
2350 struct k_sigaction
*k
;
2353 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2356 k
= &t
->sighand
->action
[sig
-1];
2358 spin_lock_irq(¤t
->sighand
->siglock
);
2363 sigdelsetmask(&act
->sa
.sa_mask
,
2364 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2368 * "Setting a signal action to SIG_IGN for a signal that is
2369 * pending shall cause the pending signal to be discarded,
2370 * whether or not it is blocked."
2372 * "Setting a signal action to SIG_DFL for a signal that is
2373 * pending and whose default action is to ignore the signal
2374 * (for example, SIGCHLD), shall cause the pending signal to
2375 * be discarded, whether or not it is blocked"
2377 if (sig_handler_ignored(sig_handler(t
, sig
), sig
)) {
2379 sigaddset(&mask
, sig
);
2380 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2382 rm_from_queue_full(&mask
, &t
->pending
);
2384 } while (t
!= current
);
2388 spin_unlock_irq(¤t
->sighand
->siglock
);
2393 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2399 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2400 oss
.ss_size
= current
->sas_ss_size
;
2401 oss
.ss_flags
= sas_ss_flags(sp
);
2410 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2411 || __get_user(ss_sp
, &uss
->ss_sp
)
2412 || __get_user(ss_flags
, &uss
->ss_flags
)
2413 || __get_user(ss_size
, &uss
->ss_size
))
2417 if (on_sig_stack(sp
))
2423 * Note - this code used to test ss_flags incorrectly
2424 * old code may have been written using ss_flags==0
2425 * to mean ss_flags==SS_ONSTACK (as this was the only
2426 * way that worked) - this fix preserves that older
2429 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2432 if (ss_flags
== SS_DISABLE
) {
2437 if (ss_size
< MINSIGSTKSZ
)
2441 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2442 current
->sas_ss_size
= ss_size
;
2447 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
2456 #ifdef __ARCH_WANT_SYS_SIGPENDING
2458 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, set
)
2460 return do_sigpending(set
, sizeof(*set
));
2465 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2466 /* Some platforms have their own version with special arguments others
2467 support only sys_rt_sigprocmask. */
2469 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, set
,
2470 old_sigset_t __user
*, oset
)
2473 old_sigset_t old_set
, new_set
;
2477 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2479 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2481 spin_lock_irq(¤t
->sighand
->siglock
);
2482 old_set
= current
->blocked
.sig
[0];
2490 sigaddsetmask(¤t
->blocked
, new_set
);
2493 sigdelsetmask(¤t
->blocked
, new_set
);
2496 current
->blocked
.sig
[0] = new_set
;
2500 recalc_sigpending();
2501 spin_unlock_irq(¤t
->sighand
->siglock
);
2507 old_set
= current
->blocked
.sig
[0];
2510 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2517 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2519 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2520 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
2521 const struct sigaction __user
*, act
,
2522 struct sigaction __user
*, oact
,
2525 struct k_sigaction new_sa
, old_sa
;
2528 /* XXX: Don't preclude handling different sized sigset_t's. */
2529 if (sigsetsize
!= sizeof(sigset_t
))
2533 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2537 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2540 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2546 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2548 #ifdef __ARCH_WANT_SYS_SGETMASK
2551 * For backwards compatibility. Functionality superseded by sigprocmask.
2553 SYSCALL_DEFINE0(sgetmask
)
2556 return current
->blocked
.sig
[0];
2559 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
2563 spin_lock_irq(¤t
->sighand
->siglock
);
2564 old
= current
->blocked
.sig
[0];
2566 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2568 recalc_sigpending();
2569 spin_unlock_irq(¤t
->sighand
->siglock
);
2573 #endif /* __ARCH_WANT_SGETMASK */
2575 #ifdef __ARCH_WANT_SYS_SIGNAL
2577 * For backwards compatibility. Functionality superseded by sigaction.
2579 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
2581 struct k_sigaction new_sa
, old_sa
;
2584 new_sa
.sa
.sa_handler
= handler
;
2585 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2586 sigemptyset(&new_sa
.sa
.sa_mask
);
2588 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2590 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2592 #endif /* __ARCH_WANT_SYS_SIGNAL */
2594 #ifdef __ARCH_WANT_SYS_PAUSE
2596 SYSCALL_DEFINE0(pause
)
2598 current
->state
= TASK_INTERRUPTIBLE
;
2600 return -ERESTARTNOHAND
;
2605 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2606 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
2610 /* XXX: Don't preclude handling different sized sigset_t's. */
2611 if (sigsetsize
!= sizeof(sigset_t
))
2614 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2616 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2618 spin_lock_irq(¤t
->sighand
->siglock
);
2619 current
->saved_sigmask
= current
->blocked
;
2620 current
->blocked
= newset
;
2621 recalc_sigpending();
2622 spin_unlock_irq(¤t
->sighand
->siglock
);
2624 current
->state
= TASK_INTERRUPTIBLE
;
2626 set_restore_sigmask();
2627 return -ERESTARTNOHAND
;
2629 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2631 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2636 void __init
signals_init(void)
2638 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);