2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 #include <trace/events/sched.h>
32 #include <asm/param.h>
33 #include <asm/uaccess.h>
34 #include <asm/unistd.h>
35 #include <asm/siginfo.h>
36 #include "audit.h" /* audit_signal_info() */
39 * SLAB caches for signal bits.
42 static struct kmem_cache
*sigqueue_cachep
;
44 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
46 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
49 static int sig_handler_ignored(void __user
*handler
, int sig
)
51 /* Is it explicitly or implicitly ignored? */
52 return handler
== SIG_IGN
||
53 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
56 static int sig_task_ignored(struct task_struct
*t
, int sig
,
61 handler
= sig_handler(t
, sig
);
63 if (unlikely(t
->signal
->flags
& SIGNAL_UNKILLABLE
) &&
64 handler
== SIG_DFL
&& !from_ancestor_ns
)
67 return sig_handler_ignored(handler
, sig
);
70 static int sig_ignored(struct task_struct
*t
, int sig
, int from_ancestor_ns
)
73 * Blocked signals are never ignored, since the
74 * signal handler may change by the time it is
77 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
80 if (!sig_task_ignored(t
, sig
, from_ancestor_ns
))
84 * Tracers may want to know about even ignored signals.
86 return !tracehook_consider_ignored_signal(t
, sig
);
90 * Re-calculate pending state from the set of locally pending
91 * signals, globally pending signals, and blocked signals.
93 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
98 switch (_NSIG_WORDS
) {
100 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
101 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
104 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
105 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
106 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
107 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
110 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
111 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
114 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
119 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
121 static int recalc_sigpending_tsk(struct task_struct
*t
)
123 if (t
->signal
->group_stop_count
> 0 ||
124 PENDING(&t
->pending
, &t
->blocked
) ||
125 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
126 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
130 * We must never clear the flag in another thread, or in current
131 * when it's possible the current syscall is returning -ERESTART*.
132 * So we don't clear it here, and only callers who know they should do.
138 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
139 * This is superfluous when called on current, the wakeup is a harmless no-op.
141 void recalc_sigpending_and_wake(struct task_struct
*t
)
143 if (recalc_sigpending_tsk(t
))
144 signal_wake_up(t
, 0);
147 void recalc_sigpending(void)
149 if (unlikely(tracehook_force_sigpending()))
150 set_thread_flag(TIF_SIGPENDING
);
151 else if (!recalc_sigpending_tsk(current
) && !freezing(current
))
152 clear_thread_flag(TIF_SIGPENDING
);
156 /* Given the mask, find the first available signal that should be serviced. */
158 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
160 unsigned long i
, *s
, *m
, x
;
163 s
= pending
->signal
.sig
;
165 switch (_NSIG_WORDS
) {
167 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
168 if ((x
= *s
&~ *m
) != 0) {
169 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
174 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
176 else if ((x
= s
[1] &~ m
[1]) != 0)
183 case 1: if ((x
= *s
&~ *m
) != 0)
192 * allocate a new signal queue record
193 * - this may be called without locks if and only if t == current, otherwise an
194 * appopriate lock must be held to stop the target task from exiting
196 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, gfp_t flags
,
199 struct sigqueue
*q
= NULL
;
200 struct user_struct
*user
;
203 * We won't get problems with the target's UID changing under us
204 * because changing it requires RCU be used, and if t != current, the
205 * caller must be holding the RCU readlock (by way of a spinlock) and
206 * we use RCU protection here
208 user
= get_uid(__task_cred(t
)->user
);
209 atomic_inc(&user
->sigpending
);
210 if (override_rlimit
||
211 atomic_read(&user
->sigpending
) <=
212 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
213 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
214 if (unlikely(q
== NULL
)) {
215 atomic_dec(&user
->sigpending
);
218 INIT_LIST_HEAD(&q
->list
);
226 static void __sigqueue_free(struct sigqueue
*q
)
228 if (q
->flags
& SIGQUEUE_PREALLOC
)
230 atomic_dec(&q
->user
->sigpending
);
232 kmem_cache_free(sigqueue_cachep
, q
);
235 void flush_sigqueue(struct sigpending
*queue
)
239 sigemptyset(&queue
->signal
);
240 while (!list_empty(&queue
->list
)) {
241 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
242 list_del_init(&q
->list
);
248 * Flush all pending signals for a task.
250 void __flush_signals(struct task_struct
*t
)
252 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
253 flush_sigqueue(&t
->pending
);
254 flush_sigqueue(&t
->signal
->shared_pending
);
257 void flush_signals(struct task_struct
*t
)
261 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
263 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
266 static void __flush_itimer_signals(struct sigpending
*pending
)
268 sigset_t signal
, retain
;
269 struct sigqueue
*q
, *n
;
271 signal
= pending
->signal
;
272 sigemptyset(&retain
);
274 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
275 int sig
= q
->info
.si_signo
;
277 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
278 sigaddset(&retain
, sig
);
280 sigdelset(&signal
, sig
);
281 list_del_init(&q
->list
);
286 sigorsets(&pending
->signal
, &signal
, &retain
);
289 void flush_itimer_signals(void)
291 struct task_struct
*tsk
= current
;
294 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
295 __flush_itimer_signals(&tsk
->pending
);
296 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
297 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
300 void ignore_signals(struct task_struct
*t
)
304 for (i
= 0; i
< _NSIG
; ++i
)
305 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
311 * Flush all handlers for a task.
315 flush_signal_handlers(struct task_struct
*t
, int force_default
)
318 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
319 for (i
= _NSIG
; i
!= 0 ; i
--) {
320 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
321 ka
->sa
.sa_handler
= SIG_DFL
;
323 sigemptyset(&ka
->sa
.sa_mask
);
328 int unhandled_signal(struct task_struct
*tsk
, int sig
)
330 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
331 if (is_global_init(tsk
))
333 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
335 return !tracehook_consider_fatal_signal(tsk
, sig
);
339 /* Notify the system that a driver wants to block all signals for this
340 * process, and wants to be notified if any signals at all were to be
341 * sent/acted upon. If the notifier routine returns non-zero, then the
342 * signal will be acted upon after all. If the notifier routine returns 0,
343 * then then signal will be blocked. Only one block per process is
344 * allowed. priv is a pointer to private data that the notifier routine
345 * can use to determine if the signal should be blocked or not. */
348 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
352 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
353 current
->notifier_mask
= mask
;
354 current
->notifier_data
= priv
;
355 current
->notifier
= notifier
;
356 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
359 /* Notify the system that blocking has ended. */
362 unblock_all_signals(void)
366 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
367 current
->notifier
= NULL
;
368 current
->notifier_data
= NULL
;
370 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
373 static void collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
375 struct sigqueue
*q
, *first
= NULL
;
378 * Collect the siginfo appropriate to this signal. Check if
379 * there is another siginfo for the same signal.
381 list_for_each_entry(q
, &list
->list
, list
) {
382 if (q
->info
.si_signo
== sig
) {
389 sigdelset(&list
->signal
, sig
);
393 list_del_init(&first
->list
);
394 copy_siginfo(info
, &first
->info
);
395 __sigqueue_free(first
);
397 /* Ok, it wasn't in the queue. This must be
398 a fast-pathed signal or we must have been
399 out of queue space. So zero out the info.
401 info
->si_signo
= sig
;
409 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
412 int sig
= next_signal(pending
, mask
);
415 if (current
->notifier
) {
416 if (sigismember(current
->notifier_mask
, sig
)) {
417 if (!(current
->notifier
)(current
->notifier_data
)) {
418 clear_thread_flag(TIF_SIGPENDING
);
424 collect_signal(sig
, pending
, info
);
431 * Dequeue a signal and return the element to the caller, which is
432 * expected to free it.
434 * All callers have to hold the siglock.
436 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
440 /* We only dequeue private signals from ourselves, we don't let
441 * signalfd steal them
443 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
445 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
450 * itimers are process shared and we restart periodic
451 * itimers in the signal delivery path to prevent DoS
452 * attacks in the high resolution timer case. This is
453 * compliant with the old way of self restarting
454 * itimers, as the SIGALRM is a legacy signal and only
455 * queued once. Changing the restart behaviour to
456 * restart the timer in the signal dequeue path is
457 * reducing the timer noise on heavy loaded !highres
460 if (unlikely(signr
== SIGALRM
)) {
461 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
463 if (!hrtimer_is_queued(tmr
) &&
464 tsk
->signal
->it_real_incr
.tv64
!= 0) {
465 hrtimer_forward(tmr
, tmr
->base
->get_time(),
466 tsk
->signal
->it_real_incr
);
467 hrtimer_restart(tmr
);
476 if (unlikely(sig_kernel_stop(signr
))) {
478 * Set a marker that we have dequeued a stop signal. Our
479 * caller might release the siglock and then the pending
480 * stop signal it is about to process is no longer in the
481 * pending bitmasks, but must still be cleared by a SIGCONT
482 * (and overruled by a SIGKILL). So those cases clear this
483 * shared flag after we've set it. Note that this flag may
484 * remain set after the signal we return is ignored or
485 * handled. That doesn't matter because its only purpose
486 * is to alert stop-signal processing code when another
487 * processor has come along and cleared the flag.
489 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
491 if ((info
->si_code
& __SI_MASK
) == __SI_TIMER
&& info
->si_sys_private
) {
493 * Release the siglock to ensure proper locking order
494 * of timer locks outside of siglocks. Note, we leave
495 * irqs disabled here, since the posix-timers code is
496 * about to disable them again anyway.
498 spin_unlock(&tsk
->sighand
->siglock
);
499 do_schedule_next_timer(info
);
500 spin_lock(&tsk
->sighand
->siglock
);
506 * Tell a process that it has a new active signal..
508 * NOTE! we rely on the previous spin_lock to
509 * lock interrupts for us! We can only be called with
510 * "siglock" held, and the local interrupt must
511 * have been disabled when that got acquired!
513 * No need to set need_resched since signal event passing
514 * goes through ->blocked
516 void signal_wake_up(struct task_struct
*t
, int resume
)
520 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
523 * For SIGKILL, we want to wake it up in the stopped/traced/killable
524 * case. We don't check t->state here because there is a race with it
525 * executing another processor and just now entering stopped state.
526 * By using wake_up_state, we ensure the process will wake up and
527 * handle its death signal.
529 mask
= TASK_INTERRUPTIBLE
;
531 mask
|= TASK_WAKEKILL
;
532 if (!wake_up_state(t
, mask
))
537 * Remove signals in mask from the pending set and queue.
538 * Returns 1 if any signals were found.
540 * All callers must be holding the siglock.
542 * This version takes a sigset mask and looks at all signals,
543 * not just those in the first mask word.
545 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
547 struct sigqueue
*q
, *n
;
550 sigandsets(&m
, mask
, &s
->signal
);
551 if (sigisemptyset(&m
))
554 signandsets(&s
->signal
, &s
->signal
, mask
);
555 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
556 if (sigismember(mask
, q
->info
.si_signo
)) {
557 list_del_init(&q
->list
);
564 * Remove signals in mask from the pending set and queue.
565 * Returns 1 if any signals were found.
567 * All callers must be holding the siglock.
569 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
571 struct sigqueue
*q
, *n
;
573 if (!sigtestsetmask(&s
->signal
, mask
))
576 sigdelsetmask(&s
->signal
, mask
);
577 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
578 if (q
->info
.si_signo
< SIGRTMIN
&&
579 (mask
& sigmask(q
->info
.si_signo
))) {
580 list_del_init(&q
->list
);
588 * Bad permissions for sending the signal
589 * - the caller must hold at least the RCU read lock
591 static int check_kill_permission(int sig
, struct siginfo
*info
,
592 struct task_struct
*t
)
594 const struct cred
*cred
= current_cred(), *tcred
;
598 if (!valid_signal(sig
))
601 if (info
!= SEND_SIG_NOINFO
&& (is_si_special(info
) || SI_FROMKERNEL(info
)))
604 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
608 tcred
= __task_cred(t
);
609 if ((cred
->euid
^ tcred
->suid
) &&
610 (cred
->euid
^ tcred
->uid
) &&
611 (cred
->uid
^ tcred
->suid
) &&
612 (cred
->uid
^ tcred
->uid
) &&
613 !capable(CAP_KILL
)) {
616 sid
= task_session(t
);
618 * We don't return the error if sid == NULL. The
619 * task was unhashed, the caller must notice this.
621 if (!sid
|| sid
== task_session(current
))
628 return security_task_kill(t
, info
, sig
, 0);
632 * Handle magic process-wide effects of stop/continue signals. Unlike
633 * the signal actions, these happen immediately at signal-generation
634 * time regardless of blocking, ignoring, or handling. This does the
635 * actual continuing for SIGCONT, but not the actual stopping for stop
636 * signals. The process stop is done as a signal action for SIG_DFL.
638 * Returns true if the signal should be actually delivered, otherwise
639 * it should be dropped.
641 static int prepare_signal(int sig
, struct task_struct
*p
, int from_ancestor_ns
)
643 struct signal_struct
*signal
= p
->signal
;
644 struct task_struct
*t
;
646 if (unlikely(signal
->flags
& SIGNAL_GROUP_EXIT
)) {
648 * The process is in the middle of dying, nothing to do.
650 } else if (sig_kernel_stop(sig
)) {
652 * This is a stop signal. Remove SIGCONT from all queues.
654 rm_from_queue(sigmask(SIGCONT
), &signal
->shared_pending
);
657 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
658 } while_each_thread(p
, t
);
659 } else if (sig
== SIGCONT
) {
662 * Remove all stop signals from all queues,
663 * and wake all threads.
665 rm_from_queue(SIG_KERNEL_STOP_MASK
, &signal
->shared_pending
);
669 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
671 * If there is a handler for SIGCONT, we must make
672 * sure that no thread returns to user mode before
673 * we post the signal, in case it was the only
674 * thread eligible to run the signal handler--then
675 * it must not do anything between resuming and
676 * running the handler. With the TIF_SIGPENDING
677 * flag set, the thread will pause and acquire the
678 * siglock that we hold now and until we've queued
679 * the pending signal.
681 * Wake up the stopped thread _after_ setting
684 state
= __TASK_STOPPED
;
685 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
686 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
687 state
|= TASK_INTERRUPTIBLE
;
689 wake_up_state(t
, state
);
690 } while_each_thread(p
, t
);
693 * Notify the parent with CLD_CONTINUED if we were stopped.
695 * If we were in the middle of a group stop, we pretend it
696 * was already finished, and then continued. Since SIGCHLD
697 * doesn't queue we report only CLD_STOPPED, as if the next
698 * CLD_CONTINUED was dropped.
701 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
702 why
|= SIGNAL_CLD_CONTINUED
;
703 else if (signal
->group_stop_count
)
704 why
|= SIGNAL_CLD_STOPPED
;
708 * The first thread which returns from do_signal_stop()
709 * will take ->siglock, notice SIGNAL_CLD_MASK, and
710 * notify its parent. See get_signal_to_deliver().
712 signal
->flags
= why
| SIGNAL_STOP_CONTINUED
;
713 signal
->group_stop_count
= 0;
714 signal
->group_exit_code
= 0;
717 * We are not stopped, but there could be a stop
718 * signal in the middle of being processed after
719 * being removed from the queue. Clear that too.
721 signal
->flags
&= ~SIGNAL_STOP_DEQUEUED
;
725 return !sig_ignored(p
, sig
, from_ancestor_ns
);
729 * Test if P wants to take SIG. After we've checked all threads with this,
730 * it's equivalent to finding no threads not blocking SIG. Any threads not
731 * blocking SIG were ruled out because they are not running and already
732 * have pending signals. Such threads will dequeue from the shared queue
733 * as soon as they're available, so putting the signal on the shared queue
734 * will be equivalent to sending it to one such thread.
736 static inline int wants_signal(int sig
, struct task_struct
*p
)
738 if (sigismember(&p
->blocked
, sig
))
740 if (p
->flags
& PF_EXITING
)
744 if (task_is_stopped_or_traced(p
))
746 return task_curr(p
) || !signal_pending(p
);
749 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
751 struct signal_struct
*signal
= p
->signal
;
752 struct task_struct
*t
;
755 * Now find a thread we can wake up to take the signal off the queue.
757 * If the main thread wants the signal, it gets first crack.
758 * Probably the least surprising to the average bear.
760 if (wants_signal(sig
, p
))
762 else if (!group
|| thread_group_empty(p
))
764 * There is just one thread and it does not need to be woken.
765 * It will dequeue unblocked signals before it runs again.
770 * Otherwise try to find a suitable thread.
772 t
= signal
->curr_target
;
773 while (!wants_signal(sig
, t
)) {
775 if (t
== signal
->curr_target
)
777 * No thread needs to be woken.
778 * Any eligible threads will see
779 * the signal in the queue soon.
783 signal
->curr_target
= t
;
787 * Found a killable thread. If the signal will be fatal,
788 * then start taking the whole group down immediately.
790 if (sig_fatal(p
, sig
) &&
791 !(signal
->flags
& (SIGNAL_UNKILLABLE
| SIGNAL_GROUP_EXIT
)) &&
792 !sigismember(&t
->real_blocked
, sig
) &&
794 !tracehook_consider_fatal_signal(t
, sig
))) {
796 * This signal will be fatal to the whole group.
798 if (!sig_kernel_coredump(sig
)) {
800 * Start a group exit and wake everybody up.
801 * This way we don't have other threads
802 * running and doing things after a slower
803 * thread has the fatal signal pending.
805 signal
->flags
= SIGNAL_GROUP_EXIT
;
806 signal
->group_exit_code
= sig
;
807 signal
->group_stop_count
= 0;
810 sigaddset(&t
->pending
.signal
, SIGKILL
);
811 signal_wake_up(t
, 1);
812 } while_each_thread(p
, t
);
818 * The signal is already in the shared-pending queue.
819 * Tell the chosen thread to wake up and dequeue it.
821 signal_wake_up(t
, sig
== SIGKILL
);
825 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
827 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
830 static int __send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
831 int group
, int from_ancestor_ns
)
833 struct sigpending
*pending
;
837 trace_sched_signal_send(sig
, t
);
839 assert_spin_locked(&t
->sighand
->siglock
);
841 if (!prepare_signal(sig
, t
, from_ancestor_ns
))
844 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
846 * Short-circuit ignored signals and support queuing
847 * exactly one non-rt signal, so that we can get more
848 * detailed information about the cause of the signal.
850 if (legacy_queue(pending
, sig
))
853 * fast-pathed signals for kernel-internal things like SIGSTOP
856 if (info
== SEND_SIG_FORCED
)
859 /* Real-time signals must be queued if sent by sigqueue, or
860 some other real-time mechanism. It is implementation
861 defined whether kill() does so. We attempt to do so, on
862 the principle of least surprise, but since kill is not
863 allowed to fail with EAGAIN when low on memory we just
864 make sure at least one signal gets delivered and don't
865 pass on the info struct. */
868 override_rlimit
= (is_si_special(info
) || info
->si_code
>= 0);
872 q
= __sigqueue_alloc(t
, GFP_ATOMIC
| __GFP_NOTRACK_FALSE_POSITIVE
,
875 list_add_tail(&q
->list
, &pending
->list
);
876 switch ((unsigned long) info
) {
877 case (unsigned long) SEND_SIG_NOINFO
:
878 q
->info
.si_signo
= sig
;
879 q
->info
.si_errno
= 0;
880 q
->info
.si_code
= SI_USER
;
881 q
->info
.si_pid
= task_tgid_nr_ns(current
,
882 task_active_pid_ns(t
));
883 q
->info
.si_uid
= current_uid();
885 case (unsigned long) SEND_SIG_PRIV
:
886 q
->info
.si_signo
= sig
;
887 q
->info
.si_errno
= 0;
888 q
->info
.si_code
= SI_KERNEL
;
893 copy_siginfo(&q
->info
, info
);
894 if (from_ancestor_ns
)
898 } else if (!is_si_special(info
)) {
899 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
)
901 * Queue overflow, abort. We may abort if the signal was rt
902 * and sent by user using something other than kill().
908 signalfd_notify(t
, sig
);
909 sigaddset(&pending
->signal
, sig
);
910 complete_signal(sig
, t
, group
);
914 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
917 int from_ancestor_ns
= 0;
920 if (!is_si_special(info
) && SI_FROMUSER(info
) &&
921 task_pid_nr_ns(current
, task_active_pid_ns(t
)) <= 0)
922 from_ancestor_ns
= 1;
925 return __send_signal(sig
, info
, t
, group
, from_ancestor_ns
);
928 int print_fatal_signals
;
930 static void print_fatal_signal(struct pt_regs
*regs
, int signr
)
932 printk("%s/%d: potentially unexpected fatal signal %d.\n",
933 current
->comm
, task_pid_nr(current
), signr
);
935 #if defined(__i386__) && !defined(__arch_um__)
936 printk("code at %08lx: ", regs
->ip
);
939 for (i
= 0; i
< 16; i
++) {
942 __get_user(insn
, (unsigned char *)(regs
->ip
+ i
));
943 printk("%02x ", insn
);
953 static int __init
setup_print_fatal_signals(char *str
)
955 get_option (&str
, &print_fatal_signals
);
960 __setup("print-fatal-signals=", setup_print_fatal_signals
);
963 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
965 return send_signal(sig
, info
, p
, 1);
969 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
971 return send_signal(sig
, info
, t
, 0);
974 int do_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
,
980 if (lock_task_sighand(p
, &flags
)) {
981 ret
= send_signal(sig
, info
, p
, group
);
982 unlock_task_sighand(p
, &flags
);
989 * Force a signal that the process can't ignore: if necessary
990 * we unblock the signal and change any SIG_IGN to SIG_DFL.
992 * Note: If we unblock the signal, we always reset it to SIG_DFL,
993 * since we do not want to have a signal handler that was blocked
994 * be invoked when user space had explicitly blocked it.
996 * We don't want to have recursive SIGSEGV's etc, for example,
997 * that is why we also clear SIGNAL_UNKILLABLE.
1000 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1002 unsigned long int flags
;
1003 int ret
, blocked
, ignored
;
1004 struct k_sigaction
*action
;
1006 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
1007 action
= &t
->sighand
->action
[sig
-1];
1008 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
1009 blocked
= sigismember(&t
->blocked
, sig
);
1010 if (blocked
|| ignored
) {
1011 action
->sa
.sa_handler
= SIG_DFL
;
1013 sigdelset(&t
->blocked
, sig
);
1014 recalc_sigpending_and_wake(t
);
1017 if (action
->sa
.sa_handler
== SIG_DFL
)
1018 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
1019 ret
= specific_send_sig_info(sig
, info
, t
);
1020 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
1026 force_sig_specific(int sig
, struct task_struct
*t
)
1028 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
1032 * Nuke all other threads in the group.
1034 void zap_other_threads(struct task_struct
*p
)
1036 struct task_struct
*t
;
1038 p
->signal
->group_stop_count
= 0;
1040 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1042 * Don't bother with already dead threads
1047 /* SIGKILL will be handled before any pending SIGSTOP */
1048 sigaddset(&t
->pending
.signal
, SIGKILL
);
1049 signal_wake_up(t
, 1);
1053 int __fatal_signal_pending(struct task_struct
*tsk
)
1055 return sigismember(&tsk
->pending
.signal
, SIGKILL
);
1057 EXPORT_SYMBOL(__fatal_signal_pending
);
1059 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
1061 struct sighand_struct
*sighand
;
1065 sighand
= rcu_dereference(tsk
->sighand
);
1066 if (unlikely(sighand
== NULL
))
1069 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1070 if (likely(sighand
== tsk
->sighand
))
1072 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1080 * send signal info to all the members of a group
1081 * - the caller must hold the RCU read lock at least
1083 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1085 int ret
= check_kill_permission(sig
, info
, p
);
1088 ret
= do_send_sig_info(sig
, info
, p
, true);
1094 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1095 * control characters do (^C, ^Z etc)
1096 * - the caller must hold at least a readlock on tasklist_lock
1098 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1100 struct task_struct
*p
= NULL
;
1101 int retval
, success
;
1105 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1106 int err
= group_send_sig_info(sig
, info
, p
);
1109 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1110 return success
? 0 : retval
;
1113 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1116 struct task_struct
*p
;
1120 p
= pid_task(pid
, PIDTYPE_PID
);
1122 error
= group_send_sig_info(sig
, info
, p
);
1123 if (unlikely(error
== -ESRCH
))
1125 * The task was unhashed in between, try again.
1126 * If it is dead, pid_task() will return NULL,
1127 * if we race with de_thread() it will find the
1138 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1142 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1147 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1148 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1149 uid_t uid
, uid_t euid
, u32 secid
)
1152 struct task_struct
*p
;
1153 const struct cred
*pcred
;
1155 if (!valid_signal(sig
))
1158 read_lock(&tasklist_lock
);
1159 p
= pid_task(pid
, PIDTYPE_PID
);
1164 pcred
= __task_cred(p
);
1165 if ((info
== SEND_SIG_NOINFO
||
1166 (!is_si_special(info
) && SI_FROMUSER(info
))) &&
1167 euid
!= pcred
->suid
&& euid
!= pcred
->uid
&&
1168 uid
!= pcred
->suid
&& uid
!= pcred
->uid
) {
1172 ret
= security_task_kill(p
, info
, sig
, secid
);
1175 if (sig
&& p
->sighand
) {
1176 unsigned long flags
;
1177 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1178 ret
= __send_signal(sig
, info
, p
, 1, 0);
1179 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1182 read_unlock(&tasklist_lock
);
1185 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1188 * kill_something_info() interprets pid in interesting ways just like kill(2).
1190 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1191 * is probably wrong. Should make it like BSD or SYSV.
1194 static int kill_something_info(int sig
, struct siginfo
*info
, pid_t pid
)
1200 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1205 read_lock(&tasklist_lock
);
1207 ret
= __kill_pgrp_info(sig
, info
,
1208 pid
? find_vpid(-pid
) : task_pgrp(current
));
1210 int retval
= 0, count
= 0;
1211 struct task_struct
* p
;
1213 for_each_process(p
) {
1214 if (task_pid_vnr(p
) > 1 &&
1215 !same_thread_group(p
, current
)) {
1216 int err
= group_send_sig_info(sig
, info
, p
);
1222 ret
= count
? retval
: -ESRCH
;
1224 read_unlock(&tasklist_lock
);
1230 * These are for backward compatibility with the rest of the kernel source.
1234 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1237 * Make sure legacy kernel users don't send in bad values
1238 * (normal paths check this in check_kill_permission).
1240 if (!valid_signal(sig
))
1243 return do_send_sig_info(sig
, info
, p
, false);
1246 #define __si_special(priv) \
1247 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1250 send_sig(int sig
, struct task_struct
*p
, int priv
)
1252 return send_sig_info(sig
, __si_special(priv
), p
);
1256 force_sig(int sig
, struct task_struct
*p
)
1258 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1262 * When things go south during signal handling, we
1263 * will force a SIGSEGV. And if the signal that caused
1264 * the problem was already a SIGSEGV, we'll want to
1265 * make sure we don't even try to deliver the signal..
1268 force_sigsegv(int sig
, struct task_struct
*p
)
1270 if (sig
== SIGSEGV
) {
1271 unsigned long flags
;
1272 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1273 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1274 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1276 force_sig(SIGSEGV
, p
);
1280 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1284 read_lock(&tasklist_lock
);
1285 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1286 read_unlock(&tasklist_lock
);
1290 EXPORT_SYMBOL(kill_pgrp
);
1292 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1294 return kill_pid_info(sig
, __si_special(priv
), pid
);
1296 EXPORT_SYMBOL(kill_pid
);
1299 * These functions support sending signals using preallocated sigqueue
1300 * structures. This is needed "because realtime applications cannot
1301 * afford to lose notifications of asynchronous events, like timer
1302 * expirations or I/O completions". In the case of Posix Timers
1303 * we allocate the sigqueue structure from the timer_create. If this
1304 * allocation fails we are able to report the failure to the application
1305 * with an EAGAIN error.
1308 struct sigqueue
*sigqueue_alloc(void)
1312 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1313 q
->flags
|= SIGQUEUE_PREALLOC
;
1317 void sigqueue_free(struct sigqueue
*q
)
1319 unsigned long flags
;
1320 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1322 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1324 * We must hold ->siglock while testing q->list
1325 * to serialize with collect_signal() or with
1326 * __exit_signal()->flush_sigqueue().
1328 spin_lock_irqsave(lock
, flags
);
1329 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1331 * If it is queued it will be freed when dequeued,
1332 * like the "regular" sigqueue.
1334 if (!list_empty(&q
->list
))
1336 spin_unlock_irqrestore(lock
, flags
);
1342 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1344 int sig
= q
->info
.si_signo
;
1345 struct sigpending
*pending
;
1346 unsigned long flags
;
1349 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1352 if (!likely(lock_task_sighand(t
, &flags
)))
1355 ret
= 1; /* the signal is ignored */
1356 if (!prepare_signal(sig
, t
, 0))
1360 if (unlikely(!list_empty(&q
->list
))) {
1362 * If an SI_TIMER entry is already queue just increment
1363 * the overrun count.
1365 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1366 q
->info
.si_overrun
++;
1369 q
->info
.si_overrun
= 0;
1371 signalfd_notify(t
, sig
);
1372 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1373 list_add_tail(&q
->list
, &pending
->list
);
1374 sigaddset(&pending
->signal
, sig
);
1375 complete_signal(sig
, t
, group
);
1377 unlock_task_sighand(t
, &flags
);
1383 * Let a parent know about the death of a child.
1384 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1386 * Returns -1 if our parent ignored us and so we've switched to
1387 * self-reaping, or else @sig.
1389 int do_notify_parent(struct task_struct
*tsk
, int sig
)
1391 struct siginfo info
;
1392 unsigned long flags
;
1393 struct sighand_struct
*psig
;
1398 /* do_notify_parent_cldstop should have been called instead. */
1399 BUG_ON(task_is_stopped_or_traced(tsk
));
1401 BUG_ON(!task_ptrace(tsk
) &&
1402 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1404 info
.si_signo
= sig
;
1407 * we are under tasklist_lock here so our parent is tied to
1408 * us and cannot exit and release its namespace.
1410 * the only it can is to switch its nsproxy with sys_unshare,
1411 * bu uncharing pid namespaces is not allowed, so we'll always
1412 * see relevant namespace
1414 * write_lock() currently calls preempt_disable() which is the
1415 * same as rcu_read_lock(), but according to Oleg, this is not
1416 * correct to rely on this
1419 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1420 info
.si_uid
= __task_cred(tsk
)->uid
;
1423 info
.si_utime
= cputime_to_clock_t(cputime_add(tsk
->utime
,
1424 tsk
->signal
->utime
));
1425 info
.si_stime
= cputime_to_clock_t(cputime_add(tsk
->stime
,
1426 tsk
->signal
->stime
));
1428 info
.si_status
= tsk
->exit_code
& 0x7f;
1429 if (tsk
->exit_code
& 0x80)
1430 info
.si_code
= CLD_DUMPED
;
1431 else if (tsk
->exit_code
& 0x7f)
1432 info
.si_code
= CLD_KILLED
;
1434 info
.si_code
= CLD_EXITED
;
1435 info
.si_status
= tsk
->exit_code
>> 8;
1438 psig
= tsk
->parent
->sighand
;
1439 spin_lock_irqsave(&psig
->siglock
, flags
);
1440 if (!task_ptrace(tsk
) && sig
== SIGCHLD
&&
1441 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1442 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1444 * We are exiting and our parent doesn't care. POSIX.1
1445 * defines special semantics for setting SIGCHLD to SIG_IGN
1446 * or setting the SA_NOCLDWAIT flag: we should be reaped
1447 * automatically and not left for our parent's wait4 call.
1448 * Rather than having the parent do it as a magic kind of
1449 * signal handler, we just set this to tell do_exit that we
1450 * can be cleaned up without becoming a zombie. Note that
1451 * we still call __wake_up_parent in this case, because a
1452 * blocked sys_wait4 might now return -ECHILD.
1454 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1455 * is implementation-defined: we do (if you don't want
1456 * it, just use SIG_IGN instead).
1458 ret
= tsk
->exit_signal
= -1;
1459 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1462 if (valid_signal(sig
) && sig
> 0)
1463 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1464 __wake_up_parent(tsk
, tsk
->parent
);
1465 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1470 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1472 struct siginfo info
;
1473 unsigned long flags
;
1474 struct task_struct
*parent
;
1475 struct sighand_struct
*sighand
;
1477 if (task_ptrace(tsk
))
1478 parent
= tsk
->parent
;
1480 tsk
= tsk
->group_leader
;
1481 parent
= tsk
->real_parent
;
1484 info
.si_signo
= SIGCHLD
;
1487 * see comment in do_notify_parent() abot the following 3 lines
1490 info
.si_pid
= task_pid_nr_ns(tsk
, parent
->nsproxy
->pid_ns
);
1491 info
.si_uid
= __task_cred(tsk
)->uid
;
1494 info
.si_utime
= cputime_to_clock_t(tsk
->utime
);
1495 info
.si_stime
= cputime_to_clock_t(tsk
->stime
);
1500 info
.si_status
= SIGCONT
;
1503 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1506 info
.si_status
= tsk
->exit_code
& 0x7f;
1512 sighand
= parent
->sighand
;
1513 spin_lock_irqsave(&sighand
->siglock
, flags
);
1514 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1515 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1516 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1518 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1520 __wake_up_parent(tsk
, parent
);
1521 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1524 static inline int may_ptrace_stop(void)
1526 if (!likely(task_ptrace(current
)))
1529 * Are we in the middle of do_coredump?
1530 * If so and our tracer is also part of the coredump stopping
1531 * is a deadlock situation, and pointless because our tracer
1532 * is dead so don't allow us to stop.
1533 * If SIGKILL was already sent before the caller unlocked
1534 * ->siglock we must see ->core_state != NULL. Otherwise it
1535 * is safe to enter schedule().
1537 if (unlikely(current
->mm
->core_state
) &&
1538 unlikely(current
->mm
== current
->parent
->mm
))
1545 * Return nonzero if there is a SIGKILL that should be waking us up.
1546 * Called with the siglock held.
1548 static int sigkill_pending(struct task_struct
*tsk
)
1550 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1551 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
1555 * This must be called with current->sighand->siglock held.
1557 * This should be the path for all ptrace stops.
1558 * We always set current->last_siginfo while stopped here.
1559 * That makes it a way to test a stopped process for
1560 * being ptrace-stopped vs being job-control-stopped.
1562 * If we actually decide not to stop at all because the tracer
1563 * is gone, we keep current->exit_code unless clear_code.
1565 static void ptrace_stop(int exit_code
, int clear_code
, siginfo_t
*info
)
1567 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1569 * The arch code has something special to do before a
1570 * ptrace stop. This is allowed to block, e.g. for faults
1571 * on user stack pages. We can't keep the siglock while
1572 * calling arch_ptrace_stop, so we must release it now.
1573 * To preserve proper semantics, we must do this before
1574 * any signal bookkeeping like checking group_stop_count.
1575 * Meanwhile, a SIGKILL could come in before we retake the
1576 * siglock. That must prevent us from sleeping in TASK_TRACED.
1577 * So after regaining the lock, we must check for SIGKILL.
1579 spin_unlock_irq(¤t
->sighand
->siglock
);
1580 arch_ptrace_stop(exit_code
, info
);
1581 spin_lock_irq(¤t
->sighand
->siglock
);
1582 if (sigkill_pending(current
))
1587 * If there is a group stop in progress,
1588 * we must participate in the bookkeeping.
1590 if (current
->signal
->group_stop_count
> 0)
1591 --current
->signal
->group_stop_count
;
1593 current
->last_siginfo
= info
;
1594 current
->exit_code
= exit_code
;
1596 /* Let the debugger run. */
1597 __set_current_state(TASK_TRACED
);
1598 spin_unlock_irq(¤t
->sighand
->siglock
);
1599 read_lock(&tasklist_lock
);
1600 if (may_ptrace_stop()) {
1601 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1603 * Don't want to allow preemption here, because
1604 * sys_ptrace() needs this task to be inactive.
1606 * XXX: implement read_unlock_no_resched().
1609 read_unlock(&tasklist_lock
);
1610 preempt_enable_no_resched();
1614 * By the time we got the lock, our tracer went away.
1615 * Don't drop the lock yet, another tracer may come.
1617 __set_current_state(TASK_RUNNING
);
1619 current
->exit_code
= 0;
1620 read_unlock(&tasklist_lock
);
1624 * While in TASK_TRACED, we were considered "frozen enough".
1625 * Now that we woke up, it's crucial if we're supposed to be
1626 * frozen that we freeze now before running anything substantial.
1631 * We are back. Now reacquire the siglock before touching
1632 * last_siginfo, so that we are sure to have synchronized with
1633 * any signal-sending on another CPU that wants to examine it.
1635 spin_lock_irq(¤t
->sighand
->siglock
);
1636 current
->last_siginfo
= NULL
;
1639 * Queued signals ignored us while we were stopped for tracing.
1640 * So check for any that we should take before resuming user mode.
1641 * This sets TIF_SIGPENDING, but never clears it.
1643 recalc_sigpending_tsk(current
);
1646 void ptrace_notify(int exit_code
)
1650 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1652 memset(&info
, 0, sizeof info
);
1653 info
.si_signo
= SIGTRAP
;
1654 info
.si_code
= exit_code
;
1655 info
.si_pid
= task_pid_vnr(current
);
1656 info
.si_uid
= current_uid();
1658 /* Let the debugger run. */
1659 spin_lock_irq(¤t
->sighand
->siglock
);
1660 ptrace_stop(exit_code
, 1, &info
);
1661 spin_unlock_irq(¤t
->sighand
->siglock
);
1665 * This performs the stopping for SIGSTOP and other stop signals.
1666 * We have to stop all threads in the thread group.
1667 * Returns nonzero if we've actually stopped and released the siglock.
1668 * Returns zero if we didn't stop and still hold the siglock.
1670 static int do_signal_stop(int signr
)
1672 struct signal_struct
*sig
= current
->signal
;
1675 if (!sig
->group_stop_count
) {
1676 struct task_struct
*t
;
1678 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
) ||
1679 unlikely(signal_group_exit(sig
)))
1682 * There is no group stop already in progress.
1683 * We must initiate one now.
1685 sig
->group_exit_code
= signr
;
1687 sig
->group_stop_count
= 1;
1688 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1690 * Setting state to TASK_STOPPED for a group
1691 * stop is always done with the siglock held,
1692 * so this check has no races.
1694 if (!(t
->flags
& PF_EXITING
) &&
1695 !task_is_stopped_or_traced(t
)) {
1696 sig
->group_stop_count
++;
1697 signal_wake_up(t
, 0);
1701 * If there are no other threads in the group, or if there is
1702 * a group stop in progress and we are the last to stop, report
1703 * to the parent. When ptraced, every thread reports itself.
1705 notify
= sig
->group_stop_count
== 1 ? CLD_STOPPED
: 0;
1706 notify
= tracehook_notify_jctl(notify
, CLD_STOPPED
);
1708 * tracehook_notify_jctl() can drop and reacquire siglock, so
1709 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1710 * or SIGKILL comes in between ->group_stop_count == 0.
1712 if (sig
->group_stop_count
) {
1713 if (!--sig
->group_stop_count
)
1714 sig
->flags
= SIGNAL_STOP_STOPPED
;
1715 current
->exit_code
= sig
->group_exit_code
;
1716 __set_current_state(TASK_STOPPED
);
1718 spin_unlock_irq(¤t
->sighand
->siglock
);
1721 read_lock(&tasklist_lock
);
1722 do_notify_parent_cldstop(current
, notify
);
1723 read_unlock(&tasklist_lock
);
1726 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1729 } while (try_to_freeze());
1731 tracehook_finish_jctl();
1732 current
->exit_code
= 0;
1737 static int ptrace_signal(int signr
, siginfo_t
*info
,
1738 struct pt_regs
*regs
, void *cookie
)
1740 if (!task_ptrace(current
))
1743 ptrace_signal_deliver(regs
, cookie
);
1745 /* Let the debugger run. */
1746 ptrace_stop(signr
, 0, info
);
1748 /* We're back. Did the debugger cancel the sig? */
1749 signr
= current
->exit_code
;
1753 current
->exit_code
= 0;
1755 /* Update the siginfo structure if the signal has
1756 changed. If the debugger wanted something
1757 specific in the siginfo structure then it should
1758 have updated *info via PTRACE_SETSIGINFO. */
1759 if (signr
!= info
->si_signo
) {
1760 info
->si_signo
= signr
;
1762 info
->si_code
= SI_USER
;
1763 info
->si_pid
= task_pid_vnr(current
->parent
);
1764 info
->si_uid
= task_uid(current
->parent
);
1767 /* If the (new) signal is now blocked, requeue it. */
1768 if (sigismember(¤t
->blocked
, signr
)) {
1769 specific_send_sig_info(signr
, info
, current
);
1776 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1777 struct pt_regs
*regs
, void *cookie
)
1779 struct sighand_struct
*sighand
= current
->sighand
;
1780 struct signal_struct
*signal
= current
->signal
;
1785 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1786 * While in TASK_STOPPED, we were considered "frozen enough".
1787 * Now that we woke up, it's crucial if we're supposed to be
1788 * frozen that we freeze now before running anything substantial.
1792 spin_lock_irq(&sighand
->siglock
);
1794 * Every stopped thread goes here after wakeup. Check to see if
1795 * we should notify the parent, prepare_signal(SIGCONT) encodes
1796 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1798 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
1799 int why
= (signal
->flags
& SIGNAL_STOP_CONTINUED
)
1800 ? CLD_CONTINUED
: CLD_STOPPED
;
1801 signal
->flags
&= ~SIGNAL_CLD_MASK
;
1803 why
= tracehook_notify_jctl(why
, CLD_CONTINUED
);
1804 spin_unlock_irq(&sighand
->siglock
);
1807 read_lock(&tasklist_lock
);
1808 do_notify_parent_cldstop(current
->group_leader
, why
);
1809 read_unlock(&tasklist_lock
);
1815 struct k_sigaction
*ka
;
1817 if (unlikely(signal
->group_stop_count
> 0) &&
1822 * Tracing can induce an artifical signal and choose sigaction.
1823 * The return value in @signr determines the default action,
1824 * but @info->si_signo is the signal number we will report.
1826 signr
= tracehook_get_signal(current
, regs
, info
, return_ka
);
1827 if (unlikely(signr
< 0))
1829 if (unlikely(signr
!= 0))
1832 signr
= dequeue_signal(current
, ¤t
->blocked
,
1836 break; /* will return 0 */
1838 if (signr
!= SIGKILL
) {
1839 signr
= ptrace_signal(signr
, info
,
1845 ka
= &sighand
->action
[signr
-1];
1848 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1850 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1851 /* Run the handler. */
1854 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1855 ka
->sa
.sa_handler
= SIG_DFL
;
1857 break; /* will return non-zero "signr" value */
1861 * Now we are doing the default action for this signal.
1863 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1867 * Global init gets no signals it doesn't want.
1868 * Container-init gets no signals it doesn't want from same
1871 * Note that if global/container-init sees a sig_kernel_only()
1872 * signal here, the signal must have been generated internally
1873 * or must have come from an ancestor namespace. In either
1874 * case, the signal cannot be dropped.
1876 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
1877 !sig_kernel_only(signr
))
1880 if (sig_kernel_stop(signr
)) {
1882 * The default action is to stop all threads in
1883 * the thread group. The job control signals
1884 * do nothing in an orphaned pgrp, but SIGSTOP
1885 * always works. Note that siglock needs to be
1886 * dropped during the call to is_orphaned_pgrp()
1887 * because of lock ordering with tasklist_lock.
1888 * This allows an intervening SIGCONT to be posted.
1889 * We need to check for that and bail out if necessary.
1891 if (signr
!= SIGSTOP
) {
1892 spin_unlock_irq(&sighand
->siglock
);
1894 /* signals can be posted during this window */
1896 if (is_current_pgrp_orphaned())
1899 spin_lock_irq(&sighand
->siglock
);
1902 if (likely(do_signal_stop(info
->si_signo
))) {
1903 /* It released the siglock. */
1908 * We didn't actually stop, due to a race
1909 * with SIGCONT or something like that.
1914 spin_unlock_irq(&sighand
->siglock
);
1917 * Anything else is fatal, maybe with a core dump.
1919 current
->flags
|= PF_SIGNALED
;
1921 if (sig_kernel_coredump(signr
)) {
1922 if (print_fatal_signals
)
1923 print_fatal_signal(regs
, info
->si_signo
);
1925 * If it was able to dump core, this kills all
1926 * other threads in the group and synchronizes with
1927 * their demise. If we lost the race with another
1928 * thread getting here, it set group_exit_code
1929 * first and our do_group_exit call below will use
1930 * that value and ignore the one we pass it.
1932 do_coredump(info
->si_signo
, info
->si_signo
, regs
);
1936 * Death signals, no core dump.
1938 do_group_exit(info
->si_signo
);
1941 spin_unlock_irq(&sighand
->siglock
);
1945 void exit_signals(struct task_struct
*tsk
)
1948 struct task_struct
*t
;
1950 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
1951 tsk
->flags
|= PF_EXITING
;
1955 spin_lock_irq(&tsk
->sighand
->siglock
);
1957 * From now this task is not visible for group-wide signals,
1958 * see wants_signal(), do_signal_stop().
1960 tsk
->flags
|= PF_EXITING
;
1961 if (!signal_pending(tsk
))
1964 /* It could be that __group_complete_signal() choose us to
1965 * notify about group-wide signal. Another thread should be
1966 * woken now to take the signal since we will not.
1968 for (t
= tsk
; (t
= next_thread(t
)) != tsk
; )
1969 if (!signal_pending(t
) && !(t
->flags
& PF_EXITING
))
1970 recalc_sigpending_and_wake(t
);
1972 if (unlikely(tsk
->signal
->group_stop_count
) &&
1973 !--tsk
->signal
->group_stop_count
) {
1974 tsk
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1975 group_stop
= tracehook_notify_jctl(CLD_STOPPED
, CLD_STOPPED
);
1978 spin_unlock_irq(&tsk
->sighand
->siglock
);
1980 if (unlikely(group_stop
)) {
1981 read_lock(&tasklist_lock
);
1982 do_notify_parent_cldstop(tsk
, group_stop
);
1983 read_unlock(&tasklist_lock
);
1987 EXPORT_SYMBOL(recalc_sigpending
);
1988 EXPORT_SYMBOL_GPL(dequeue_signal
);
1989 EXPORT_SYMBOL(flush_signals
);
1990 EXPORT_SYMBOL(force_sig
);
1991 EXPORT_SYMBOL(send_sig
);
1992 EXPORT_SYMBOL(send_sig_info
);
1993 EXPORT_SYMBOL(sigprocmask
);
1994 EXPORT_SYMBOL(block_all_signals
);
1995 EXPORT_SYMBOL(unblock_all_signals
);
1999 * System call entry points.
2002 SYSCALL_DEFINE0(restart_syscall
)
2004 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
2005 return restart
->fn(restart
);
2008 long do_no_restart_syscall(struct restart_block
*param
)
2014 * We don't need to get the kernel lock - this is all local to this
2015 * particular thread.. (and that's good, because this is _heavily_
2016 * used by various programs)
2020 * This is also useful for kernel threads that want to temporarily
2021 * (or permanently) block certain signals.
2023 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2024 * interface happily blocks "unblockable" signals like SIGKILL
2027 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2031 spin_lock_irq(¤t
->sighand
->siglock
);
2033 *oldset
= current
->blocked
;
2038 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
2041 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
2044 current
->blocked
= *set
;
2049 recalc_sigpending();
2050 spin_unlock_irq(¤t
->sighand
->siglock
);
2055 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, set
,
2056 sigset_t __user
*, oset
, size_t, sigsetsize
)
2058 int error
= -EINVAL
;
2059 sigset_t old_set
, new_set
;
2061 /* XXX: Don't preclude handling different sized sigset_t's. */
2062 if (sigsetsize
!= sizeof(sigset_t
))
2067 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2069 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2071 error
= sigprocmask(how
, &new_set
, &old_set
);
2077 spin_lock_irq(¤t
->sighand
->siglock
);
2078 old_set
= current
->blocked
;
2079 spin_unlock_irq(¤t
->sighand
->siglock
);
2083 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2091 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2093 long error
= -EINVAL
;
2096 if (sigsetsize
> sizeof(sigset_t
))
2099 spin_lock_irq(¤t
->sighand
->siglock
);
2100 sigorsets(&pending
, ¤t
->pending
.signal
,
2101 ¤t
->signal
->shared_pending
.signal
);
2102 spin_unlock_irq(¤t
->sighand
->siglock
);
2104 /* Outside the lock because only this thread touches it. */
2105 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2108 if (!copy_to_user(set
, &pending
, sigsetsize
))
2115 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, set
, size_t, sigsetsize
)
2117 return do_sigpending(set
, sigsetsize
);
2120 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2122 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2126 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2128 if (from
->si_code
< 0)
2129 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2132 * If you change siginfo_t structure, please be sure
2133 * this code is fixed accordingly.
2134 * Please remember to update the signalfd_copyinfo() function
2135 * inside fs/signalfd.c too, in case siginfo_t changes.
2136 * It should never copy any pad contained in the structure
2137 * to avoid security leaks, but must copy the generic
2138 * 3 ints plus the relevant union member.
2140 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2141 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2142 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2143 switch (from
->si_code
& __SI_MASK
) {
2145 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2146 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2149 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2150 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2151 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2154 err
|= __put_user(from
->si_band
, &to
->si_band
);
2155 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2158 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2159 #ifdef __ARCH_SI_TRAPNO
2160 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2164 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2165 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2166 err
|= __put_user(from
->si_status
, &to
->si_status
);
2167 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2168 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2170 case __SI_RT
: /* This is not generated by the kernel as of now. */
2171 case __SI_MESGQ
: /* But this is */
2172 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2173 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2174 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2176 default: /* this is just in case for now ... */
2177 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2178 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2186 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
2187 siginfo_t __user
*, uinfo
, const struct timespec __user
*, uts
,
2196 /* XXX: Don't preclude handling different sized sigset_t's. */
2197 if (sigsetsize
!= sizeof(sigset_t
))
2200 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2204 * Invert the set of allowed signals to get those we
2207 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2211 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2213 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2218 spin_lock_irq(¤t
->sighand
->siglock
);
2219 sig
= dequeue_signal(current
, &these
, &info
);
2221 timeout
= MAX_SCHEDULE_TIMEOUT
;
2223 timeout
= (timespec_to_jiffies(&ts
)
2224 + (ts
.tv_sec
|| ts
.tv_nsec
));
2227 /* None ready -- temporarily unblock those we're
2228 * interested while we are sleeping in so that we'll
2229 * be awakened when they arrive. */
2230 current
->real_blocked
= current
->blocked
;
2231 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2232 recalc_sigpending();
2233 spin_unlock_irq(¤t
->sighand
->siglock
);
2235 timeout
= schedule_timeout_interruptible(timeout
);
2237 spin_lock_irq(¤t
->sighand
->siglock
);
2238 sig
= dequeue_signal(current
, &these
, &info
);
2239 current
->blocked
= current
->real_blocked
;
2240 siginitset(¤t
->real_blocked
, 0);
2241 recalc_sigpending();
2244 spin_unlock_irq(¤t
->sighand
->siglock
);
2249 if (copy_siginfo_to_user(uinfo
, &info
))
2261 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
2263 struct siginfo info
;
2265 info
.si_signo
= sig
;
2267 info
.si_code
= SI_USER
;
2268 info
.si_pid
= task_tgid_vnr(current
);
2269 info
.si_uid
= current_uid();
2271 return kill_something_info(sig
, &info
, pid
);
2275 do_send_specific(pid_t tgid
, pid_t pid
, int sig
, struct siginfo
*info
)
2277 struct task_struct
*p
;
2281 p
= find_task_by_vpid(pid
);
2282 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
2283 error
= check_kill_permission(sig
, info
, p
);
2285 * The null signal is a permissions and process existence
2286 * probe. No signal is actually delivered.
2288 if (!error
&& sig
) {
2289 error
= do_send_sig_info(sig
, info
, p
, false);
2291 * If lock_task_sighand() failed we pretend the task
2292 * dies after receiving the signal. The window is tiny,
2293 * and the signal is private anyway.
2295 if (unlikely(error
== -ESRCH
))
2304 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
2306 struct siginfo info
;
2308 info
.si_signo
= sig
;
2310 info
.si_code
= SI_TKILL
;
2311 info
.si_pid
= task_tgid_vnr(current
);
2312 info
.si_uid
= current_uid();
2314 return do_send_specific(tgid
, pid
, sig
, &info
);
2318 * sys_tgkill - send signal to one specific thread
2319 * @tgid: the thread group ID of the thread
2320 * @pid: the PID of the thread
2321 * @sig: signal to be sent
2323 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2324 * exists but it's not belonging to the target process anymore. This
2325 * method solves the problem of threads exiting and PIDs getting reused.
2327 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
2329 /* This is only valid for single tasks */
2330 if (pid
<= 0 || tgid
<= 0)
2333 return do_tkill(tgid
, pid
, sig
);
2337 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2339 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
2341 /* This is only valid for single tasks */
2345 return do_tkill(0, pid
, sig
);
2348 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
2349 siginfo_t __user
*, uinfo
)
2353 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2356 /* Not even root can pretend to send signals from the kernel.
2357 Nor can they impersonate a kill(), which adds source info. */
2358 if (info
.si_code
>= 0)
2360 info
.si_signo
= sig
;
2362 /* POSIX.1b doesn't mention process groups. */
2363 return kill_proc_info(sig
, &info
, pid
);
2366 long do_rt_tgsigqueueinfo(pid_t tgid
, pid_t pid
, int sig
, siginfo_t
*info
)
2368 /* This is only valid for single tasks */
2369 if (pid
<= 0 || tgid
<= 0)
2372 /* Not even root can pretend to send signals from the kernel.
2373 Nor can they impersonate a kill(), which adds source info. */
2374 if (info
->si_code
>= 0)
2376 info
->si_signo
= sig
;
2378 return do_send_specific(tgid
, pid
, sig
, info
);
2381 SYSCALL_DEFINE4(rt_tgsigqueueinfo
, pid_t
, tgid
, pid_t
, pid
, int, sig
,
2382 siginfo_t __user
*, uinfo
)
2386 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2389 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
2392 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2394 struct task_struct
*t
= current
;
2395 struct k_sigaction
*k
;
2398 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2401 k
= &t
->sighand
->action
[sig
-1];
2403 spin_lock_irq(¤t
->sighand
->siglock
);
2408 sigdelsetmask(&act
->sa
.sa_mask
,
2409 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2413 * "Setting a signal action to SIG_IGN for a signal that is
2414 * pending shall cause the pending signal to be discarded,
2415 * whether or not it is blocked."
2417 * "Setting a signal action to SIG_DFL for a signal that is
2418 * pending and whose default action is to ignore the signal
2419 * (for example, SIGCHLD), shall cause the pending signal to
2420 * be discarded, whether or not it is blocked"
2422 if (sig_handler_ignored(sig_handler(t
, sig
), sig
)) {
2424 sigaddset(&mask
, sig
);
2425 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2427 rm_from_queue_full(&mask
, &t
->pending
);
2429 } while (t
!= current
);
2433 spin_unlock_irq(¤t
->sighand
->siglock
);
2438 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2443 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2444 oss
.ss_size
= current
->sas_ss_size
;
2445 oss
.ss_flags
= sas_ss_flags(sp
);
2453 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
)))
2455 error
= __get_user(ss_sp
, &uss
->ss_sp
) |
2456 __get_user(ss_flags
, &uss
->ss_flags
) |
2457 __get_user(ss_size
, &uss
->ss_size
);
2462 if (on_sig_stack(sp
))
2468 * Note - this code used to test ss_flags incorrectly
2469 * old code may have been written using ss_flags==0
2470 * to mean ss_flags==SS_ONSTACK (as this was the only
2471 * way that worked) - this fix preserves that older
2474 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2477 if (ss_flags
== SS_DISABLE
) {
2482 if (ss_size
< MINSIGSTKSZ
)
2486 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2487 current
->sas_ss_size
= ss_size
;
2493 if (!access_ok(VERIFY_WRITE
, uoss
, sizeof(*uoss
)))
2495 error
= __put_user(oss
.ss_sp
, &uoss
->ss_sp
) |
2496 __put_user(oss
.ss_size
, &uoss
->ss_size
) |
2497 __put_user(oss
.ss_flags
, &uoss
->ss_flags
);
2504 #ifdef __ARCH_WANT_SYS_SIGPENDING
2506 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, set
)
2508 return do_sigpending(set
, sizeof(*set
));
2513 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2514 /* Some platforms have their own version with special arguments others
2515 support only sys_rt_sigprocmask. */
2517 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, set
,
2518 old_sigset_t __user
*, oset
)
2521 old_sigset_t old_set
, new_set
;
2525 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2527 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2529 spin_lock_irq(¤t
->sighand
->siglock
);
2530 old_set
= current
->blocked
.sig
[0];
2538 sigaddsetmask(¤t
->blocked
, new_set
);
2541 sigdelsetmask(¤t
->blocked
, new_set
);
2544 current
->blocked
.sig
[0] = new_set
;
2548 recalc_sigpending();
2549 spin_unlock_irq(¤t
->sighand
->siglock
);
2555 old_set
= current
->blocked
.sig
[0];
2558 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2565 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2567 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2568 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
2569 const struct sigaction __user
*, act
,
2570 struct sigaction __user
*, oact
,
2573 struct k_sigaction new_sa
, old_sa
;
2576 /* XXX: Don't preclude handling different sized sigset_t's. */
2577 if (sigsetsize
!= sizeof(sigset_t
))
2581 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2585 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2588 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2594 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2596 #ifdef __ARCH_WANT_SYS_SGETMASK
2599 * For backwards compatibility. Functionality superseded by sigprocmask.
2601 SYSCALL_DEFINE0(sgetmask
)
2604 return current
->blocked
.sig
[0];
2607 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
2611 spin_lock_irq(¤t
->sighand
->siglock
);
2612 old
= current
->blocked
.sig
[0];
2614 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2616 recalc_sigpending();
2617 spin_unlock_irq(¤t
->sighand
->siglock
);
2621 #endif /* __ARCH_WANT_SGETMASK */
2623 #ifdef __ARCH_WANT_SYS_SIGNAL
2625 * For backwards compatibility. Functionality superseded by sigaction.
2627 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
2629 struct k_sigaction new_sa
, old_sa
;
2632 new_sa
.sa
.sa_handler
= handler
;
2633 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2634 sigemptyset(&new_sa
.sa
.sa_mask
);
2636 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2638 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2640 #endif /* __ARCH_WANT_SYS_SIGNAL */
2642 #ifdef __ARCH_WANT_SYS_PAUSE
2644 SYSCALL_DEFINE0(pause
)
2646 current
->state
= TASK_INTERRUPTIBLE
;
2648 return -ERESTARTNOHAND
;
2653 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2654 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
2658 /* XXX: Don't preclude handling different sized sigset_t's. */
2659 if (sigsetsize
!= sizeof(sigset_t
))
2662 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2664 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2666 spin_lock_irq(¤t
->sighand
->siglock
);
2667 current
->saved_sigmask
= current
->blocked
;
2668 current
->blocked
= newset
;
2669 recalc_sigpending();
2670 spin_unlock_irq(¤t
->sighand
->siglock
);
2672 current
->state
= TASK_INTERRUPTIBLE
;
2674 set_restore_sigmask();
2675 return -ERESTARTNOHAND
;
2677 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2679 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2684 void __init
signals_init(void)
2686 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);