Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/signal.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson | |
7 | * | |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. | |
9 | * Changes to use preallocated sigqueue structures | |
10 | * to allow signals to be sent reliably. | |
11 | */ | |
12 | ||
1da177e4 LT |
13 | #include <linux/slab.h> |
14 | #include <linux/module.h> | |
15 | #include <linux/smp_lock.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/fs.h> | |
19 | #include <linux/tty.h> | |
20 | #include <linux/binfmts.h> | |
21 | #include <linux/security.h> | |
22 | #include <linux/syscalls.h> | |
23 | #include <linux/ptrace.h> | |
7ed20e1a | 24 | #include <linux/signal.h> |
c59ede7b | 25 | #include <linux/capability.h> |
7dfb7103 | 26 | #include <linux/freezer.h> |
1da177e4 LT |
27 | #include <asm/param.h> |
28 | #include <asm/uaccess.h> | |
29 | #include <asm/unistd.h> | |
30 | #include <asm/siginfo.h> | |
e1396065 | 31 | #include "audit.h" /* audit_signal_info() */ |
1da177e4 LT |
32 | |
33 | /* | |
34 | * SLAB caches for signal bits. | |
35 | */ | |
36 | ||
e18b890b | 37 | static struct kmem_cache *sigqueue_cachep; |
1da177e4 LT |
38 | |
39 | /* | |
40 | * In POSIX a signal is sent either to a specific thread (Linux task) | |
41 | * or to the process as a whole (Linux thread group). How the signal | |
42 | * is sent determines whether it's to one thread or the whole group, | |
43 | * which determines which signal mask(s) are involved in blocking it | |
44 | * from being delivered until later. When the signal is delivered, | |
45 | * either it's caught or ignored by a user handler or it has a default | |
46 | * effect that applies to the whole thread group (POSIX process). | |
47 | * | |
48 | * The possible effects an unblocked signal set to SIG_DFL can have are: | |
49 | * ignore - Nothing Happens | |
50 | * terminate - kill the process, i.e. all threads in the group, | |
51 | * similar to exit_group. The group leader (only) reports | |
52 | * WIFSIGNALED status to its parent. | |
53 | * coredump - write a core dump file describing all threads using | |
54 | * the same mm and then kill all those threads | |
55 | * stop - stop all the threads in the group, i.e. TASK_STOPPED state | |
56 | * | |
57 | * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. | |
58 | * Other signals when not blocked and set to SIG_DFL behaves as follows. | |
59 | * The job control signals also have other special effects. | |
60 | * | |
61 | * +--------------------+------------------+ | |
62 | * | POSIX signal | default action | | |
63 | * +--------------------+------------------+ | |
64 | * | SIGHUP | terminate | | |
65 | * | SIGINT | terminate | | |
66 | * | SIGQUIT | coredump | | |
67 | * | SIGILL | coredump | | |
68 | * | SIGTRAP | coredump | | |
69 | * | SIGABRT/SIGIOT | coredump | | |
70 | * | SIGBUS | coredump | | |
71 | * | SIGFPE | coredump | | |
72 | * | SIGKILL | terminate(+) | | |
73 | * | SIGUSR1 | terminate | | |
74 | * | SIGSEGV | coredump | | |
75 | * | SIGUSR2 | terminate | | |
76 | * | SIGPIPE | terminate | | |
77 | * | SIGALRM | terminate | | |
78 | * | SIGTERM | terminate | | |
79 | * | SIGCHLD | ignore | | |
80 | * | SIGCONT | ignore(*) | | |
81 | * | SIGSTOP | stop(*)(+) | | |
82 | * | SIGTSTP | stop(*) | | |
83 | * | SIGTTIN | stop(*) | | |
84 | * | SIGTTOU | stop(*) | | |
85 | * | SIGURG | ignore | | |
86 | * | SIGXCPU | coredump | | |
87 | * | SIGXFSZ | coredump | | |
88 | * | SIGVTALRM | terminate | | |
89 | * | SIGPROF | terminate | | |
90 | * | SIGPOLL/SIGIO | terminate | | |
91 | * | SIGSYS/SIGUNUSED | coredump | | |
92 | * | SIGSTKFLT | terminate | | |
93 | * | SIGWINCH | ignore | | |
94 | * | SIGPWR | terminate | | |
95 | * | SIGRTMIN-SIGRTMAX | terminate | | |
96 | * +--------------------+------------------+ | |
97 | * | non-POSIX signal | default action | | |
98 | * +--------------------+------------------+ | |
99 | * | SIGEMT | coredump | | |
100 | * +--------------------+------------------+ | |
101 | * | |
102 | * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". | |
103 | * (*) Special job control effects: | |
104 | * When SIGCONT is sent, it resumes the process (all threads in the group) | |
105 | * from TASK_STOPPED state and also clears any pending/queued stop signals | |
106 | * (any of those marked with "stop(*)"). This happens regardless of blocking, | |
107 | * catching, or ignoring SIGCONT. When any stop signal is sent, it clears | |
108 | * any pending/queued SIGCONT signals; this happens regardless of blocking, | |
109 | * catching, or ignored the stop signal, though (except for SIGSTOP) the | |
110 | * default action of stopping the process may happen later or never. | |
111 | */ | |
112 | ||
113 | #ifdef SIGEMT | |
114 | #define M_SIGEMT M(SIGEMT) | |
115 | #else | |
116 | #define M_SIGEMT 0 | |
117 | #endif | |
118 | ||
119 | #if SIGRTMIN > BITS_PER_LONG | |
120 | #define M(sig) (1ULL << ((sig)-1)) | |
121 | #else | |
122 | #define M(sig) (1UL << ((sig)-1)) | |
123 | #endif | |
124 | #define T(sig, mask) (M(sig) & (mask)) | |
125 | ||
126 | #define SIG_KERNEL_ONLY_MASK (\ | |
127 | M(SIGKILL) | M(SIGSTOP) ) | |
128 | ||
129 | #define SIG_KERNEL_STOP_MASK (\ | |
130 | M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) ) | |
131 | ||
132 | #define SIG_KERNEL_COREDUMP_MASK (\ | |
133 | M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \ | |
134 | M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \ | |
135 | M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT ) | |
136 | ||
137 | #define SIG_KERNEL_IGNORE_MASK (\ | |
138 | M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) ) | |
139 | ||
140 | #define sig_kernel_only(sig) \ | |
141 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK)) | |
142 | #define sig_kernel_coredump(sig) \ | |
143 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK)) | |
144 | #define sig_kernel_ignore(sig) \ | |
145 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK)) | |
146 | #define sig_kernel_stop(sig) \ | |
147 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) | |
148 | ||
6108ccd3 | 149 | #define sig_needs_tasklist(sig) ((sig) == SIGCONT) |
a9e88e84 | 150 | |
1da177e4 LT |
151 | #define sig_user_defined(t, signr) \ |
152 | (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ | |
153 | ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) | |
154 | ||
155 | #define sig_fatal(t, signr) \ | |
156 | (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ | |
157 | (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) | |
158 | ||
159 | static int sig_ignored(struct task_struct *t, int sig) | |
160 | { | |
161 | void __user * handler; | |
162 | ||
163 | /* | |
164 | * Tracers always want to know about signals.. | |
165 | */ | |
166 | if (t->ptrace & PT_PTRACED) | |
167 | return 0; | |
168 | ||
169 | /* | |
170 | * Blocked signals are never ignored, since the | |
171 | * signal handler may change by the time it is | |
172 | * unblocked. | |
173 | */ | |
174 | if (sigismember(&t->blocked, sig)) | |
175 | return 0; | |
176 | ||
177 | /* Is it explicitly or implicitly ignored? */ | |
178 | handler = t->sighand->action[sig-1].sa.sa_handler; | |
179 | return handler == SIG_IGN || | |
180 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | |
181 | } | |
182 | ||
183 | /* | |
184 | * Re-calculate pending state from the set of locally pending | |
185 | * signals, globally pending signals, and blocked signals. | |
186 | */ | |
187 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) | |
188 | { | |
189 | unsigned long ready; | |
190 | long i; | |
191 | ||
192 | switch (_NSIG_WORDS) { | |
193 | default: | |
194 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) | |
195 | ready |= signal->sig[i] &~ blocked->sig[i]; | |
196 | break; | |
197 | ||
198 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; | |
199 | ready |= signal->sig[2] &~ blocked->sig[2]; | |
200 | ready |= signal->sig[1] &~ blocked->sig[1]; | |
201 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
202 | break; | |
203 | ||
204 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; | |
205 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
206 | break; | |
207 | ||
208 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; | |
209 | } | |
210 | return ready != 0; | |
211 | } | |
212 | ||
213 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) | |
214 | ||
215 | fastcall void recalc_sigpending_tsk(struct task_struct *t) | |
216 | { | |
217 | if (t->signal->group_stop_count > 0 || | |
3e1d1d28 | 218 | (freezing(t)) || |
1da177e4 LT |
219 | PENDING(&t->pending, &t->blocked) || |
220 | PENDING(&t->signal->shared_pending, &t->blocked)) | |
221 | set_tsk_thread_flag(t, TIF_SIGPENDING); | |
222 | else | |
223 | clear_tsk_thread_flag(t, TIF_SIGPENDING); | |
224 | } | |
225 | ||
226 | void recalc_sigpending(void) | |
227 | { | |
228 | recalc_sigpending_tsk(current); | |
229 | } | |
230 | ||
231 | /* Given the mask, find the first available signal that should be serviced. */ | |
232 | ||
233 | static int | |
234 | next_signal(struct sigpending *pending, sigset_t *mask) | |
235 | { | |
236 | unsigned long i, *s, *m, x; | |
237 | int sig = 0; | |
238 | ||
239 | s = pending->signal.sig; | |
240 | m = mask->sig; | |
241 | switch (_NSIG_WORDS) { | |
242 | default: | |
243 | for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) | |
244 | if ((x = *s &~ *m) != 0) { | |
245 | sig = ffz(~x) + i*_NSIG_BPW + 1; | |
246 | break; | |
247 | } | |
248 | break; | |
249 | ||
250 | case 2: if ((x = s[0] &~ m[0]) != 0) | |
251 | sig = 1; | |
252 | else if ((x = s[1] &~ m[1]) != 0) | |
253 | sig = _NSIG_BPW + 1; | |
254 | else | |
255 | break; | |
256 | sig += ffz(~x); | |
257 | break; | |
258 | ||
259 | case 1: if ((x = *s &~ *m) != 0) | |
260 | sig = ffz(~x) + 1; | |
261 | break; | |
262 | } | |
263 | ||
264 | return sig; | |
265 | } | |
266 | ||
dd0fc66f | 267 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, |
1da177e4 LT |
268 | int override_rlimit) |
269 | { | |
270 | struct sigqueue *q = NULL; | |
10b1fbdb | 271 | struct user_struct *user; |
1da177e4 | 272 | |
10b1fbdb LT |
273 | /* |
274 | * In order to avoid problems with "switch_user()", we want to make | |
275 | * sure that the compiler doesn't re-load "t->user" | |
276 | */ | |
277 | user = t->user; | |
278 | barrier(); | |
279 | atomic_inc(&user->sigpending); | |
1da177e4 | 280 | if (override_rlimit || |
10b1fbdb | 281 | atomic_read(&user->sigpending) <= |
1da177e4 LT |
282 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) |
283 | q = kmem_cache_alloc(sigqueue_cachep, flags); | |
284 | if (unlikely(q == NULL)) { | |
10b1fbdb | 285 | atomic_dec(&user->sigpending); |
1da177e4 LT |
286 | } else { |
287 | INIT_LIST_HEAD(&q->list); | |
288 | q->flags = 0; | |
10b1fbdb | 289 | q->user = get_uid(user); |
1da177e4 LT |
290 | } |
291 | return(q); | |
292 | } | |
293 | ||
514a01b8 | 294 | static void __sigqueue_free(struct sigqueue *q) |
1da177e4 LT |
295 | { |
296 | if (q->flags & SIGQUEUE_PREALLOC) | |
297 | return; | |
298 | atomic_dec(&q->user->sigpending); | |
299 | free_uid(q->user); | |
300 | kmem_cache_free(sigqueue_cachep, q); | |
301 | } | |
302 | ||
6a14c5c9 | 303 | void flush_sigqueue(struct sigpending *queue) |
1da177e4 LT |
304 | { |
305 | struct sigqueue *q; | |
306 | ||
307 | sigemptyset(&queue->signal); | |
308 | while (!list_empty(&queue->list)) { | |
309 | q = list_entry(queue->list.next, struct sigqueue , list); | |
310 | list_del_init(&q->list); | |
311 | __sigqueue_free(q); | |
312 | } | |
313 | } | |
314 | ||
315 | /* | |
316 | * Flush all pending signals for a task. | |
317 | */ | |
c81addc9 | 318 | void flush_signals(struct task_struct *t) |
1da177e4 LT |
319 | { |
320 | unsigned long flags; | |
321 | ||
322 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
323 | clear_tsk_thread_flag(t,TIF_SIGPENDING); | |
324 | flush_sigqueue(&t->pending); | |
325 | flush_sigqueue(&t->signal->shared_pending); | |
326 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | |
327 | } | |
328 | ||
1da177e4 LT |
329 | /* |
330 | * Flush all handlers for a task. | |
331 | */ | |
332 | ||
333 | void | |
334 | flush_signal_handlers(struct task_struct *t, int force_default) | |
335 | { | |
336 | int i; | |
337 | struct k_sigaction *ka = &t->sighand->action[0]; | |
338 | for (i = _NSIG ; i != 0 ; i--) { | |
339 | if (force_default || ka->sa.sa_handler != SIG_IGN) | |
340 | ka->sa.sa_handler = SIG_DFL; | |
341 | ka->sa.sa_flags = 0; | |
342 | sigemptyset(&ka->sa.sa_mask); | |
343 | ka++; | |
344 | } | |
345 | } | |
346 | ||
347 | ||
348 | /* Notify the system that a driver wants to block all signals for this | |
349 | * process, and wants to be notified if any signals at all were to be | |
350 | * sent/acted upon. If the notifier routine returns non-zero, then the | |
351 | * signal will be acted upon after all. If the notifier routine returns 0, | |
352 | * then then signal will be blocked. Only one block per process is | |
353 | * allowed. priv is a pointer to private data that the notifier routine | |
354 | * can use to determine if the signal should be blocked or not. */ | |
355 | ||
356 | void | |
357 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) | |
358 | { | |
359 | unsigned long flags; | |
360 | ||
361 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
362 | current->notifier_mask = mask; | |
363 | current->notifier_data = priv; | |
364 | current->notifier = notifier; | |
365 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
366 | } | |
367 | ||
368 | /* Notify the system that blocking has ended. */ | |
369 | ||
370 | void | |
371 | unblock_all_signals(void) | |
372 | { | |
373 | unsigned long flags; | |
374 | ||
375 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
376 | current->notifier = NULL; | |
377 | current->notifier_data = NULL; | |
378 | recalc_sigpending(); | |
379 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
380 | } | |
381 | ||
858119e1 | 382 | static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
1da177e4 LT |
383 | { |
384 | struct sigqueue *q, *first = NULL; | |
385 | int still_pending = 0; | |
386 | ||
387 | if (unlikely(!sigismember(&list->signal, sig))) | |
388 | return 0; | |
389 | ||
390 | /* | |
391 | * Collect the siginfo appropriate to this signal. Check if | |
392 | * there is another siginfo for the same signal. | |
393 | */ | |
394 | list_for_each_entry(q, &list->list, list) { | |
395 | if (q->info.si_signo == sig) { | |
396 | if (first) { | |
397 | still_pending = 1; | |
398 | break; | |
399 | } | |
400 | first = q; | |
401 | } | |
402 | } | |
403 | if (first) { | |
404 | list_del_init(&first->list); | |
405 | copy_siginfo(info, &first->info); | |
406 | __sigqueue_free(first); | |
407 | if (!still_pending) | |
408 | sigdelset(&list->signal, sig); | |
409 | } else { | |
410 | ||
411 | /* Ok, it wasn't in the queue. This must be | |
412 | a fast-pathed signal or we must have been | |
413 | out of queue space. So zero out the info. | |
414 | */ | |
415 | sigdelset(&list->signal, sig); | |
416 | info->si_signo = sig; | |
417 | info->si_errno = 0; | |
418 | info->si_code = 0; | |
419 | info->si_pid = 0; | |
420 | info->si_uid = 0; | |
421 | } | |
422 | return 1; | |
423 | } | |
424 | ||
425 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |
426 | siginfo_t *info) | |
427 | { | |
27d91e07 | 428 | int sig = next_signal(pending, mask); |
1da177e4 | 429 | |
1da177e4 LT |
430 | if (sig) { |
431 | if (current->notifier) { | |
432 | if (sigismember(current->notifier_mask, sig)) { | |
433 | if (!(current->notifier)(current->notifier_data)) { | |
434 | clear_thread_flag(TIF_SIGPENDING); | |
435 | return 0; | |
436 | } | |
437 | } | |
438 | } | |
439 | ||
440 | if (!collect_signal(sig, pending, info)) | |
441 | sig = 0; | |
1da177e4 | 442 | } |
1da177e4 LT |
443 | |
444 | return sig; | |
445 | } | |
446 | ||
447 | /* | |
448 | * Dequeue a signal and return the element to the caller, which is | |
449 | * expected to free it. | |
450 | * | |
451 | * All callers have to hold the siglock. | |
452 | */ | |
453 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |
454 | { | |
455 | int signr = __dequeue_signal(&tsk->pending, mask, info); | |
456 | if (!signr) | |
457 | signr = __dequeue_signal(&tsk->signal->shared_pending, | |
458 | mask, info); | |
27d91e07 | 459 | recalc_sigpending_tsk(tsk); |
1da177e4 LT |
460 | if (signr && unlikely(sig_kernel_stop(signr))) { |
461 | /* | |
462 | * Set a marker that we have dequeued a stop signal. Our | |
463 | * caller might release the siglock and then the pending | |
464 | * stop signal it is about to process is no longer in the | |
465 | * pending bitmasks, but must still be cleared by a SIGCONT | |
466 | * (and overruled by a SIGKILL). So those cases clear this | |
467 | * shared flag after we've set it. Note that this flag may | |
468 | * remain set after the signal we return is ignored or | |
469 | * handled. That doesn't matter because its only purpose | |
470 | * is to alert stop-signal processing code when another | |
471 | * processor has come along and cleared the flag. | |
472 | */ | |
788e05a6 ON |
473 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) |
474 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | |
1da177e4 LT |
475 | } |
476 | if ( signr && | |
477 | ((info->si_code & __SI_MASK) == __SI_TIMER) && | |
478 | info->si_sys_private){ | |
479 | /* | |
480 | * Release the siglock to ensure proper locking order | |
481 | * of timer locks outside of siglocks. Note, we leave | |
482 | * irqs disabled here, since the posix-timers code is | |
483 | * about to disable them again anyway. | |
484 | */ | |
485 | spin_unlock(&tsk->sighand->siglock); | |
486 | do_schedule_next_timer(info); | |
487 | spin_lock(&tsk->sighand->siglock); | |
488 | } | |
489 | return signr; | |
490 | } | |
491 | ||
492 | /* | |
493 | * Tell a process that it has a new active signal.. | |
494 | * | |
495 | * NOTE! we rely on the previous spin_lock to | |
496 | * lock interrupts for us! We can only be called with | |
497 | * "siglock" held, and the local interrupt must | |
498 | * have been disabled when that got acquired! | |
499 | * | |
500 | * No need to set need_resched since signal event passing | |
501 | * goes through ->blocked | |
502 | */ | |
503 | void signal_wake_up(struct task_struct *t, int resume) | |
504 | { | |
505 | unsigned int mask; | |
506 | ||
507 | set_tsk_thread_flag(t, TIF_SIGPENDING); | |
508 | ||
509 | /* | |
510 | * For SIGKILL, we want to wake it up in the stopped/traced case. | |
511 | * We don't check t->state here because there is a race with it | |
512 | * executing another processor and just now entering stopped state. | |
513 | * By using wake_up_state, we ensure the process will wake up and | |
514 | * handle its death signal. | |
515 | */ | |
516 | mask = TASK_INTERRUPTIBLE; | |
517 | if (resume) | |
518 | mask |= TASK_STOPPED | TASK_TRACED; | |
519 | if (!wake_up_state(t, mask)) | |
520 | kick_process(t); | |
521 | } | |
522 | ||
71fabd5e GA |
523 | /* |
524 | * Remove signals in mask from the pending set and queue. | |
525 | * Returns 1 if any signals were found. | |
526 | * | |
527 | * All callers must be holding the siglock. | |
528 | * | |
529 | * This version takes a sigset mask and looks at all signals, | |
530 | * not just those in the first mask word. | |
531 | */ | |
532 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | |
533 | { | |
534 | struct sigqueue *q, *n; | |
535 | sigset_t m; | |
536 | ||
537 | sigandsets(&m, mask, &s->signal); | |
538 | if (sigisemptyset(&m)) | |
539 | return 0; | |
540 | ||
541 | signandsets(&s->signal, &s->signal, mask); | |
542 | list_for_each_entry_safe(q, n, &s->list, list) { | |
543 | if (sigismember(mask, q->info.si_signo)) { | |
544 | list_del_init(&q->list); | |
545 | __sigqueue_free(q); | |
546 | } | |
547 | } | |
548 | return 1; | |
549 | } | |
1da177e4 LT |
550 | /* |
551 | * Remove signals in mask from the pending set and queue. | |
552 | * Returns 1 if any signals were found. | |
553 | * | |
554 | * All callers must be holding the siglock. | |
555 | */ | |
556 | static int rm_from_queue(unsigned long mask, struct sigpending *s) | |
557 | { | |
558 | struct sigqueue *q, *n; | |
559 | ||
560 | if (!sigtestsetmask(&s->signal, mask)) | |
561 | return 0; | |
562 | ||
563 | sigdelsetmask(&s->signal, mask); | |
564 | list_for_each_entry_safe(q, n, &s->list, list) { | |
565 | if (q->info.si_signo < SIGRTMIN && | |
566 | (mask & sigmask(q->info.si_signo))) { | |
567 | list_del_init(&q->list); | |
568 | __sigqueue_free(q); | |
569 | } | |
570 | } | |
571 | return 1; | |
572 | } | |
573 | ||
574 | /* | |
575 | * Bad permissions for sending the signal | |
576 | */ | |
577 | static int check_kill_permission(int sig, struct siginfo *info, | |
578 | struct task_struct *t) | |
579 | { | |
580 | int error = -EINVAL; | |
7ed20e1a | 581 | if (!valid_signal(sig)) |
1da177e4 LT |
582 | return error; |
583 | error = -EPERM; | |
621d3121 | 584 | if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) |
1da177e4 LT |
585 | && ((sig != SIGCONT) || |
586 | (current->signal->session != t->signal->session)) | |
587 | && (current->euid ^ t->suid) && (current->euid ^ t->uid) | |
588 | && (current->uid ^ t->suid) && (current->uid ^ t->uid) | |
589 | && !capable(CAP_KILL)) | |
590 | return error; | |
c2f0c7c3 | 591 | |
8f95dc58 | 592 | error = security_task_kill(t, info, sig, 0); |
c2f0c7c3 SG |
593 | if (!error) |
594 | audit_signal_info(sig, t); /* Let audit system see the signal */ | |
595 | return error; | |
1da177e4 LT |
596 | } |
597 | ||
598 | /* forward decl */ | |
a1d5e21e | 599 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why); |
1da177e4 LT |
600 | |
601 | /* | |
602 | * Handle magic process-wide effects of stop/continue signals. | |
603 | * Unlike the signal actions, these happen immediately at signal-generation | |
604 | * time regardless of blocking, ignoring, or handling. This does the | |
605 | * actual continuing for SIGCONT, but not the actual stopping for stop | |
606 | * signals. The process stop is done as a signal action for SIG_DFL. | |
607 | */ | |
608 | static void handle_stop_signal(int sig, struct task_struct *p) | |
609 | { | |
610 | struct task_struct *t; | |
611 | ||
dd12f48d | 612 | if (p->signal->flags & SIGNAL_GROUP_EXIT) |
1da177e4 LT |
613 | /* |
614 | * The process is in the middle of dying already. | |
615 | */ | |
616 | return; | |
617 | ||
618 | if (sig_kernel_stop(sig)) { | |
619 | /* | |
620 | * This is a stop signal. Remove SIGCONT from all queues. | |
621 | */ | |
622 | rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); | |
623 | t = p; | |
624 | do { | |
625 | rm_from_queue(sigmask(SIGCONT), &t->pending); | |
626 | t = next_thread(t); | |
627 | } while (t != p); | |
628 | } else if (sig == SIGCONT) { | |
629 | /* | |
630 | * Remove all stop signals from all queues, | |
631 | * and wake all threads. | |
632 | */ | |
633 | if (unlikely(p->signal->group_stop_count > 0)) { | |
634 | /* | |
635 | * There was a group stop in progress. We'll | |
636 | * pretend it finished before we got here. We are | |
637 | * obliged to report it to the parent: if the | |
638 | * SIGSTOP happened "after" this SIGCONT, then it | |
639 | * would have cleared this pending SIGCONT. If it | |
640 | * happened "before" this SIGCONT, then the parent | |
641 | * got the SIGCHLD about the stop finishing before | |
642 | * the continue happened. We do the notification | |
643 | * now, and it's as if the stop had finished and | |
644 | * the SIGCHLD was pending on entry to this kill. | |
645 | */ | |
646 | p->signal->group_stop_count = 0; | |
647 | p->signal->flags = SIGNAL_STOP_CONTINUED; | |
648 | spin_unlock(&p->sighand->siglock); | |
a1d5e21e | 649 | do_notify_parent_cldstop(p, CLD_STOPPED); |
1da177e4 LT |
650 | spin_lock(&p->sighand->siglock); |
651 | } | |
652 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); | |
653 | t = p; | |
654 | do { | |
655 | unsigned int state; | |
656 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | |
657 | ||
658 | /* | |
659 | * If there is a handler for SIGCONT, we must make | |
660 | * sure that no thread returns to user mode before | |
661 | * we post the signal, in case it was the only | |
662 | * thread eligible to run the signal handler--then | |
663 | * it must not do anything between resuming and | |
664 | * running the handler. With the TIF_SIGPENDING | |
665 | * flag set, the thread will pause and acquire the | |
666 | * siglock that we hold now and until we've queued | |
667 | * the pending signal. | |
668 | * | |
669 | * Wake up the stopped thread _after_ setting | |
670 | * TIF_SIGPENDING | |
671 | */ | |
672 | state = TASK_STOPPED; | |
673 | if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { | |
674 | set_tsk_thread_flag(t, TIF_SIGPENDING); | |
675 | state |= TASK_INTERRUPTIBLE; | |
676 | } | |
677 | wake_up_state(t, state); | |
678 | ||
679 | t = next_thread(t); | |
680 | } while (t != p); | |
681 | ||
682 | if (p->signal->flags & SIGNAL_STOP_STOPPED) { | |
683 | /* | |
684 | * We were in fact stopped, and are now continued. | |
685 | * Notify the parent with CLD_CONTINUED. | |
686 | */ | |
687 | p->signal->flags = SIGNAL_STOP_CONTINUED; | |
688 | p->signal->group_exit_code = 0; | |
689 | spin_unlock(&p->sighand->siglock); | |
a1d5e21e | 690 | do_notify_parent_cldstop(p, CLD_CONTINUED); |
1da177e4 LT |
691 | spin_lock(&p->sighand->siglock); |
692 | } else { | |
693 | /* | |
694 | * We are not stopped, but there could be a stop | |
695 | * signal in the middle of being processed after | |
696 | * being removed from the queue. Clear that too. | |
697 | */ | |
698 | p->signal->flags = 0; | |
699 | } | |
700 | } else if (sig == SIGKILL) { | |
701 | /* | |
702 | * Make sure that any pending stop signal already dequeued | |
703 | * is undone by the wakeup for SIGKILL. | |
704 | */ | |
705 | p->signal->flags = 0; | |
706 | } | |
707 | } | |
708 | ||
709 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |
710 | struct sigpending *signals) | |
711 | { | |
712 | struct sigqueue * q = NULL; | |
713 | int ret = 0; | |
714 | ||
715 | /* | |
716 | * fast-pathed signals for kernel-internal things like SIGSTOP | |
717 | * or SIGKILL. | |
718 | */ | |
b67a1b9e | 719 | if (info == SEND_SIG_FORCED) |
1da177e4 LT |
720 | goto out_set; |
721 | ||
722 | /* Real-time signals must be queued if sent by sigqueue, or | |
723 | some other real-time mechanism. It is implementation | |
724 | defined whether kill() does so. We attempt to do so, on | |
725 | the principle of least surprise, but since kill is not | |
726 | allowed to fail with EAGAIN when low on memory we just | |
727 | make sure at least one signal gets delivered and don't | |
728 | pass on the info struct. */ | |
729 | ||
730 | q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && | |
621d3121 | 731 | (is_si_special(info) || |
1da177e4 LT |
732 | info->si_code >= 0))); |
733 | if (q) { | |
734 | list_add_tail(&q->list, &signals->list); | |
735 | switch ((unsigned long) info) { | |
b67a1b9e | 736 | case (unsigned long) SEND_SIG_NOINFO: |
1da177e4 LT |
737 | q->info.si_signo = sig; |
738 | q->info.si_errno = 0; | |
739 | q->info.si_code = SI_USER; | |
740 | q->info.si_pid = current->pid; | |
741 | q->info.si_uid = current->uid; | |
742 | break; | |
b67a1b9e | 743 | case (unsigned long) SEND_SIG_PRIV: |
1da177e4 LT |
744 | q->info.si_signo = sig; |
745 | q->info.si_errno = 0; | |
746 | q->info.si_code = SI_KERNEL; | |
747 | q->info.si_pid = 0; | |
748 | q->info.si_uid = 0; | |
749 | break; | |
750 | default: | |
751 | copy_siginfo(&q->info, info); | |
752 | break; | |
753 | } | |
621d3121 ON |
754 | } else if (!is_si_special(info)) { |
755 | if (sig >= SIGRTMIN && info->si_code != SI_USER) | |
1da177e4 LT |
756 | /* |
757 | * Queue overflow, abort. We may abort if the signal was rt | |
758 | * and sent by user using something other than kill(). | |
759 | */ | |
760 | return -EAGAIN; | |
1da177e4 LT |
761 | } |
762 | ||
763 | out_set: | |
764 | sigaddset(&signals->signal, sig); | |
765 | return ret; | |
766 | } | |
767 | ||
768 | #define LEGACY_QUEUE(sigptr, sig) \ | |
769 | (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) | |
770 | ||
771 | ||
772 | static int | |
773 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |
774 | { | |
775 | int ret = 0; | |
776 | ||
fda8bd78 | 777 | BUG_ON(!irqs_disabled()); |
1da177e4 LT |
778 | assert_spin_locked(&t->sighand->siglock); |
779 | ||
1da177e4 LT |
780 | /* Short-circuit ignored signals. */ |
781 | if (sig_ignored(t, sig)) | |
782 | goto out; | |
783 | ||
784 | /* Support queueing exactly one non-rt signal, so that we | |
785 | can get more detailed information about the cause of | |
786 | the signal. */ | |
787 | if (LEGACY_QUEUE(&t->pending, sig)) | |
788 | goto out; | |
789 | ||
790 | ret = send_signal(sig, info, t, &t->pending); | |
791 | if (!ret && !sigismember(&t->blocked, sig)) | |
792 | signal_wake_up(t, sig == SIGKILL); | |
793 | out: | |
794 | return ret; | |
795 | } | |
796 | ||
797 | /* | |
798 | * Force a signal that the process can't ignore: if necessary | |
799 | * we unblock the signal and change any SIG_IGN to SIG_DFL. | |
ae74c3b6 LT |
800 | * |
801 | * Note: If we unblock the signal, we always reset it to SIG_DFL, | |
802 | * since we do not want to have a signal handler that was blocked | |
803 | * be invoked when user space had explicitly blocked it. | |
804 | * | |
805 | * We don't want to have recursive SIGSEGV's etc, for example. | |
1da177e4 | 806 | */ |
1da177e4 LT |
807 | int |
808 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |
809 | { | |
810 | unsigned long int flags; | |
ae74c3b6 LT |
811 | int ret, blocked, ignored; |
812 | struct k_sigaction *action; | |
1da177e4 LT |
813 | |
814 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
ae74c3b6 LT |
815 | action = &t->sighand->action[sig-1]; |
816 | ignored = action->sa.sa_handler == SIG_IGN; | |
817 | blocked = sigismember(&t->blocked, sig); | |
818 | if (blocked || ignored) { | |
819 | action->sa.sa_handler = SIG_DFL; | |
820 | if (blocked) { | |
821 | sigdelset(&t->blocked, sig); | |
822 | recalc_sigpending_tsk(t); | |
823 | } | |
1da177e4 LT |
824 | } |
825 | ret = specific_send_sig_info(sig, info, t); | |
826 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | |
827 | ||
828 | return ret; | |
829 | } | |
830 | ||
831 | void | |
832 | force_sig_specific(int sig, struct task_struct *t) | |
833 | { | |
b0423a0d | 834 | force_sig_info(sig, SEND_SIG_FORCED, t); |
1da177e4 LT |
835 | } |
836 | ||
837 | /* | |
838 | * Test if P wants to take SIG. After we've checked all threads with this, | |
839 | * it's equivalent to finding no threads not blocking SIG. Any threads not | |
840 | * blocking SIG were ruled out because they are not running and already | |
841 | * have pending signals. Such threads will dequeue from the shared queue | |
842 | * as soon as they're available, so putting the signal on the shared queue | |
843 | * will be equivalent to sending it to one such thread. | |
844 | */ | |
188a1eaf LT |
845 | static inline int wants_signal(int sig, struct task_struct *p) |
846 | { | |
847 | if (sigismember(&p->blocked, sig)) | |
848 | return 0; | |
849 | if (p->flags & PF_EXITING) | |
850 | return 0; | |
851 | if (sig == SIGKILL) | |
852 | return 1; | |
853 | if (p->state & (TASK_STOPPED | TASK_TRACED)) | |
854 | return 0; | |
855 | return task_curr(p) || !signal_pending(p); | |
856 | } | |
1da177e4 LT |
857 | |
858 | static void | |
859 | __group_complete_signal(int sig, struct task_struct *p) | |
860 | { | |
1da177e4 LT |
861 | struct task_struct *t; |
862 | ||
1da177e4 LT |
863 | /* |
864 | * Now find a thread we can wake up to take the signal off the queue. | |
865 | * | |
866 | * If the main thread wants the signal, it gets first crack. | |
867 | * Probably the least surprising to the average bear. | |
868 | */ | |
188a1eaf | 869 | if (wants_signal(sig, p)) |
1da177e4 LT |
870 | t = p; |
871 | else if (thread_group_empty(p)) | |
872 | /* | |
873 | * There is just one thread and it does not need to be woken. | |
874 | * It will dequeue unblocked signals before it runs again. | |
875 | */ | |
876 | return; | |
877 | else { | |
878 | /* | |
879 | * Otherwise try to find a suitable thread. | |
880 | */ | |
881 | t = p->signal->curr_target; | |
882 | if (t == NULL) | |
883 | /* restart balancing at this thread */ | |
884 | t = p->signal->curr_target = p; | |
1da177e4 | 885 | |
188a1eaf | 886 | while (!wants_signal(sig, t)) { |
1da177e4 LT |
887 | t = next_thread(t); |
888 | if (t == p->signal->curr_target) | |
889 | /* | |
890 | * No thread needs to be woken. | |
891 | * Any eligible threads will see | |
892 | * the signal in the queue soon. | |
893 | */ | |
894 | return; | |
895 | } | |
896 | p->signal->curr_target = t; | |
897 | } | |
898 | ||
899 | /* | |
900 | * Found a killable thread. If the signal will be fatal, | |
901 | * then start taking the whole group down immediately. | |
902 | */ | |
903 | if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && | |
904 | !sigismember(&t->real_blocked, sig) && | |
905 | (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { | |
906 | /* | |
907 | * This signal will be fatal to the whole group. | |
908 | */ | |
909 | if (!sig_kernel_coredump(sig)) { | |
910 | /* | |
911 | * Start a group exit and wake everybody up. | |
912 | * This way we don't have other threads | |
913 | * running and doing things after a slower | |
914 | * thread has the fatal signal pending. | |
915 | */ | |
916 | p->signal->flags = SIGNAL_GROUP_EXIT; | |
917 | p->signal->group_exit_code = sig; | |
918 | p->signal->group_stop_count = 0; | |
919 | t = p; | |
920 | do { | |
921 | sigaddset(&t->pending.signal, SIGKILL); | |
922 | signal_wake_up(t, 1); | |
923 | t = next_thread(t); | |
924 | } while (t != p); | |
925 | return; | |
926 | } | |
927 | ||
928 | /* | |
929 | * There will be a core dump. We make all threads other | |
930 | * than the chosen one go into a group stop so that nothing | |
931 | * happens until it gets scheduled, takes the signal off | |
932 | * the shared queue, and does the core dump. This is a | |
933 | * little more complicated than strictly necessary, but it | |
934 | * keeps the signal state that winds up in the core dump | |
935 | * unchanged from the death state, e.g. which thread had | |
936 | * the core-dump signal unblocked. | |
937 | */ | |
938 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | |
939 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); | |
940 | p->signal->group_stop_count = 0; | |
941 | p->signal->group_exit_task = t; | |
942 | t = p; | |
943 | do { | |
944 | p->signal->group_stop_count++; | |
945 | signal_wake_up(t, 0); | |
946 | t = next_thread(t); | |
947 | } while (t != p); | |
948 | wake_up_process(p->signal->group_exit_task); | |
949 | return; | |
950 | } | |
951 | ||
952 | /* | |
953 | * The signal is already in the shared-pending queue. | |
954 | * Tell the chosen thread to wake up and dequeue it. | |
955 | */ | |
956 | signal_wake_up(t, sig == SIGKILL); | |
957 | return; | |
958 | } | |
959 | ||
960 | int | |
961 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
962 | { | |
963 | int ret = 0; | |
964 | ||
965 | assert_spin_locked(&p->sighand->siglock); | |
966 | handle_stop_signal(sig, p); | |
967 | ||
1da177e4 LT |
968 | /* Short-circuit ignored signals. */ |
969 | if (sig_ignored(p, sig)) | |
970 | return ret; | |
971 | ||
972 | if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) | |
973 | /* This is a non-RT signal and we already have one queued. */ | |
974 | return ret; | |
975 | ||
976 | /* | |
977 | * Put this signal on the shared-pending queue, or fail with EAGAIN. | |
978 | * We always use the shared queue for process-wide signals, | |
979 | * to avoid several races. | |
980 | */ | |
981 | ret = send_signal(sig, info, p, &p->signal->shared_pending); | |
982 | if (unlikely(ret)) | |
983 | return ret; | |
984 | ||
985 | __group_complete_signal(sig, p); | |
986 | return 0; | |
987 | } | |
988 | ||
989 | /* | |
990 | * Nuke all other threads in the group. | |
991 | */ | |
992 | void zap_other_threads(struct task_struct *p) | |
993 | { | |
994 | struct task_struct *t; | |
995 | ||
996 | p->signal->flags = SIGNAL_GROUP_EXIT; | |
997 | p->signal->group_stop_count = 0; | |
998 | ||
999 | if (thread_group_empty(p)) | |
1000 | return; | |
1001 | ||
1002 | for (t = next_thread(p); t != p; t = next_thread(t)) { | |
1003 | /* | |
1004 | * Don't bother with already dead threads | |
1005 | */ | |
1006 | if (t->exit_state) | |
1007 | continue; | |
1008 | ||
1009 | /* | |
1010 | * We don't want to notify the parent, since we are | |
1011 | * killed as part of a thread group due to another | |
1012 | * thread doing an execve() or similar. So set the | |
1013 | * exit signal to -1 to allow immediate reaping of | |
1014 | * the process. But don't detach the thread group | |
1015 | * leader. | |
1016 | */ | |
1017 | if (t != p->group_leader) | |
1018 | t->exit_signal = -1; | |
1019 | ||
30e0fca6 | 1020 | /* SIGKILL will be handled before any pending SIGSTOP */ |
1da177e4 | 1021 | sigaddset(&t->pending.signal, SIGKILL); |
1da177e4 LT |
1022 | signal_wake_up(t, 1); |
1023 | } | |
1024 | } | |
1025 | ||
1026 | /* | |
e56d0903 | 1027 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. |
1da177e4 | 1028 | */ |
f63ee72e ON |
1029 | struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) |
1030 | { | |
1031 | struct sighand_struct *sighand; | |
1032 | ||
1033 | for (;;) { | |
1034 | sighand = rcu_dereference(tsk->sighand); | |
1035 | if (unlikely(sighand == NULL)) | |
1036 | break; | |
1037 | ||
1038 | spin_lock_irqsave(&sighand->siglock, *flags); | |
1039 | if (likely(sighand == tsk->sighand)) | |
1040 | break; | |
1041 | spin_unlock_irqrestore(&sighand->siglock, *flags); | |
1042 | } | |
1043 | ||
1044 | return sighand; | |
1045 | } | |
1046 | ||
1da177e4 LT |
1047 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1048 | { | |
1049 | unsigned long flags; | |
1050 | int ret; | |
1051 | ||
1052 | ret = check_kill_permission(sig, info, p); | |
f63ee72e ON |
1053 | |
1054 | if (!ret && sig) { | |
1055 | ret = -ESRCH; | |
1056 | if (lock_task_sighand(p, &flags)) { | |
1057 | ret = __group_send_sig_info(sig, info, p); | |
1058 | unlock_task_sighand(p, &flags); | |
2d89c929 | 1059 | } |
1da177e4 LT |
1060 | } |
1061 | ||
1062 | return ret; | |
1063 | } | |
1064 | ||
1065 | /* | |
c4b92fc1 | 1066 | * kill_pgrp_info() sends a signal to a process group: this is what the tty |
1da177e4 LT |
1067 | * control characters do (^C, ^Z etc) |
1068 | */ | |
1069 | ||
c4b92fc1 | 1070 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1da177e4 LT |
1071 | { |
1072 | struct task_struct *p = NULL; | |
1073 | int retval, success; | |
1074 | ||
1da177e4 LT |
1075 | success = 0; |
1076 | retval = -ESRCH; | |
c4b92fc1 | 1077 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1da177e4 LT |
1078 | int err = group_send_sig_info(sig, info, p); |
1079 | success |= !err; | |
1080 | retval = err; | |
c4b92fc1 | 1081 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4 LT |
1082 | return success ? 0 : retval; |
1083 | } | |
1084 | ||
c4b92fc1 EB |
1085 | int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1086 | { | |
1087 | int retval; | |
1088 | ||
1089 | read_lock(&tasklist_lock); | |
1090 | retval = __kill_pgrp_info(sig, info, pgrp); | |
1091 | read_unlock(&tasklist_lock); | |
1092 | ||
1093 | return retval; | |
1094 | } | |
1095 | ||
1096 | int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) | |
1097 | { | |
1098 | if (pgrp <= 0) | |
1099 | return -EINVAL; | |
1100 | ||
1101 | return __kill_pgrp_info(sig, info, find_pid(pgrp)); | |
1102 | } | |
1103 | ||
1da177e4 LT |
1104 | int |
1105 | kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) | |
1106 | { | |
1107 | int retval; | |
1108 | ||
1109 | read_lock(&tasklist_lock); | |
1110 | retval = __kill_pg_info(sig, info, pgrp); | |
1111 | read_unlock(&tasklist_lock); | |
1112 | ||
1113 | return retval; | |
1114 | } | |
1115 | ||
c4b92fc1 | 1116 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1da177e4 LT |
1117 | { |
1118 | int error; | |
e56d0903 | 1119 | int acquired_tasklist_lock = 0; |
1da177e4 LT |
1120 | struct task_struct *p; |
1121 | ||
e56d0903 | 1122 | rcu_read_lock(); |
a9e88e84 | 1123 | if (unlikely(sig_needs_tasklist(sig))) { |
e56d0903 IM |
1124 | read_lock(&tasklist_lock); |
1125 | acquired_tasklist_lock = 1; | |
1126 | } | |
c4b92fc1 | 1127 | p = pid_task(pid, PIDTYPE_PID); |
1da177e4 LT |
1128 | error = -ESRCH; |
1129 | if (p) | |
1130 | error = group_send_sig_info(sig, info, p); | |
e56d0903 IM |
1131 | if (unlikely(acquired_tasklist_lock)) |
1132 | read_unlock(&tasklist_lock); | |
1133 | rcu_read_unlock(); | |
1da177e4 LT |
1134 | return error; |
1135 | } | |
1136 | ||
c4b92fc1 EB |
1137 | int |
1138 | kill_proc_info(int sig, struct siginfo *info, pid_t pid) | |
1139 | { | |
1140 | int error; | |
1141 | rcu_read_lock(); | |
1142 | error = kill_pid_info(sig, info, find_pid(pid)); | |
1143 | rcu_read_unlock(); | |
1144 | return error; | |
1145 | } | |
1146 | ||
2425c08b EB |
1147 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
1148 | int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, | |
8f95dc58 | 1149 | uid_t uid, uid_t euid, u32 secid) |
46113830 HW |
1150 | { |
1151 | int ret = -EINVAL; | |
1152 | struct task_struct *p; | |
1153 | ||
1154 | if (!valid_signal(sig)) | |
1155 | return ret; | |
1156 | ||
1157 | read_lock(&tasklist_lock); | |
2425c08b | 1158 | p = pid_task(pid, PIDTYPE_PID); |
46113830 HW |
1159 | if (!p) { |
1160 | ret = -ESRCH; | |
1161 | goto out_unlock; | |
1162 | } | |
0811af28 | 1163 | if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) |
46113830 HW |
1164 | && (euid != p->suid) && (euid != p->uid) |
1165 | && (uid != p->suid) && (uid != p->uid)) { | |
1166 | ret = -EPERM; | |
1167 | goto out_unlock; | |
1168 | } | |
8f95dc58 DQ |
1169 | ret = security_task_kill(p, info, sig, secid); |
1170 | if (ret) | |
1171 | goto out_unlock; | |
46113830 HW |
1172 | if (sig && p->sighand) { |
1173 | unsigned long flags; | |
1174 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1175 | ret = __group_send_sig_info(sig, info, p); | |
1176 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1177 | } | |
1178 | out_unlock: | |
1179 | read_unlock(&tasklist_lock); | |
1180 | return ret; | |
1181 | } | |
2425c08b | 1182 | EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); |
1da177e4 LT |
1183 | |
1184 | /* | |
1185 | * kill_something_info() interprets pid in interesting ways just like kill(2). | |
1186 | * | |
1187 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have | |
1188 | * is probably wrong. Should make it like BSD or SYSV. | |
1189 | */ | |
1190 | ||
1191 | static int kill_something_info(int sig, struct siginfo *info, int pid) | |
1192 | { | |
1193 | if (!pid) { | |
1194 | return kill_pg_info(sig, info, process_group(current)); | |
1195 | } else if (pid == -1) { | |
1196 | int retval = 0, count = 0; | |
1197 | struct task_struct * p; | |
1198 | ||
1199 | read_lock(&tasklist_lock); | |
1200 | for_each_process(p) { | |
1201 | if (p->pid > 1 && p->tgid != current->tgid) { | |
1202 | int err = group_send_sig_info(sig, info, p); | |
1203 | ++count; | |
1204 | if (err != -EPERM) | |
1205 | retval = err; | |
1206 | } | |
1207 | } | |
1208 | read_unlock(&tasklist_lock); | |
1209 | return count ? retval : -ESRCH; | |
1210 | } else if (pid < 0) { | |
1211 | return kill_pg_info(sig, info, -pid); | |
1212 | } else { | |
1213 | return kill_proc_info(sig, info, pid); | |
1214 | } | |
1215 | } | |
1216 | ||
1217 | /* | |
1218 | * These are for backward compatibility with the rest of the kernel source. | |
1219 | */ | |
1220 | ||
1221 | /* | |
1222 | * These two are the most common entry points. They send a signal | |
1223 | * just to the specific thread. | |
1224 | */ | |
1225 | int | |
1226 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
1227 | { | |
1228 | int ret; | |
1229 | unsigned long flags; | |
1230 | ||
1231 | /* | |
1232 | * Make sure legacy kernel users don't send in bad values | |
1233 | * (normal paths check this in check_kill_permission). | |
1234 | */ | |
7ed20e1a | 1235 | if (!valid_signal(sig)) |
1da177e4 LT |
1236 | return -EINVAL; |
1237 | ||
1238 | /* | |
1239 | * We need the tasklist lock even for the specific | |
1240 | * thread case (when we don't need to follow the group | |
1241 | * lists) in order to avoid races with "p->sighand" | |
1242 | * going away or changing from under us. | |
1243 | */ | |
1244 | read_lock(&tasklist_lock); | |
1245 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1246 | ret = specific_send_sig_info(sig, info, p); | |
1247 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1248 | read_unlock(&tasklist_lock); | |
1249 | return ret; | |
1250 | } | |
1251 | ||
b67a1b9e ON |
1252 | #define __si_special(priv) \ |
1253 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) | |
1254 | ||
1da177e4 LT |
1255 | int |
1256 | send_sig(int sig, struct task_struct *p, int priv) | |
1257 | { | |
b67a1b9e | 1258 | return send_sig_info(sig, __si_special(priv), p); |
1da177e4 LT |
1259 | } |
1260 | ||
1261 | /* | |
1262 | * This is the entry point for "process-wide" signals. | |
1263 | * They will go to an appropriate thread in the thread group. | |
1264 | */ | |
1265 | int | |
1266 | send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
1267 | { | |
1268 | int ret; | |
1269 | read_lock(&tasklist_lock); | |
1270 | ret = group_send_sig_info(sig, info, p); | |
1271 | read_unlock(&tasklist_lock); | |
1272 | return ret; | |
1273 | } | |
1274 | ||
1275 | void | |
1276 | force_sig(int sig, struct task_struct *p) | |
1277 | { | |
b67a1b9e | 1278 | force_sig_info(sig, SEND_SIG_PRIV, p); |
1da177e4 LT |
1279 | } |
1280 | ||
1281 | /* | |
1282 | * When things go south during signal handling, we | |
1283 | * will force a SIGSEGV. And if the signal that caused | |
1284 | * the problem was already a SIGSEGV, we'll want to | |
1285 | * make sure we don't even try to deliver the signal.. | |
1286 | */ | |
1287 | int | |
1288 | force_sigsegv(int sig, struct task_struct *p) | |
1289 | { | |
1290 | if (sig == SIGSEGV) { | |
1291 | unsigned long flags; | |
1292 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1293 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | |
1294 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1295 | } | |
1296 | force_sig(SIGSEGV, p); | |
1297 | return 0; | |
1298 | } | |
1299 | ||
c4b92fc1 EB |
1300 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1301 | { | |
1302 | return kill_pgrp_info(sig, __si_special(priv), pid); | |
1303 | } | |
1304 | EXPORT_SYMBOL(kill_pgrp); | |
1305 | ||
1306 | int kill_pid(struct pid *pid, int sig, int priv) | |
1307 | { | |
1308 | return kill_pid_info(sig, __si_special(priv), pid); | |
1309 | } | |
1310 | EXPORT_SYMBOL(kill_pid); | |
1311 | ||
1da177e4 LT |
1312 | int |
1313 | kill_pg(pid_t pgrp, int sig, int priv) | |
1314 | { | |
b67a1b9e | 1315 | return kill_pg_info(sig, __si_special(priv), pgrp); |
1da177e4 LT |
1316 | } |
1317 | ||
1318 | int | |
1319 | kill_proc(pid_t pid, int sig, int priv) | |
1320 | { | |
b67a1b9e | 1321 | return kill_proc_info(sig, __si_special(priv), pid); |
1da177e4 LT |
1322 | } |
1323 | ||
1324 | /* | |
1325 | * These functions support sending signals using preallocated sigqueue | |
1326 | * structures. This is needed "because realtime applications cannot | |
1327 | * afford to lose notifications of asynchronous events, like timer | |
1328 | * expirations or I/O completions". In the case of Posix Timers | |
1329 | * we allocate the sigqueue structure from the timer_create. If this | |
1330 | * allocation fails we are able to report the failure to the application | |
1331 | * with an EAGAIN error. | |
1332 | */ | |
1333 | ||
1334 | struct sigqueue *sigqueue_alloc(void) | |
1335 | { | |
1336 | struct sigqueue *q; | |
1337 | ||
1338 | if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) | |
1339 | q->flags |= SIGQUEUE_PREALLOC; | |
1340 | return(q); | |
1341 | } | |
1342 | ||
1343 | void sigqueue_free(struct sigqueue *q) | |
1344 | { | |
1345 | unsigned long flags; | |
1346 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | |
1347 | /* | |
1348 | * If the signal is still pending remove it from the | |
1349 | * pending queue. | |
1350 | */ | |
1351 | if (unlikely(!list_empty(&q->list))) { | |
19a4fcb5 ON |
1352 | spinlock_t *lock = ¤t->sighand->siglock; |
1353 | read_lock(&tasklist_lock); | |
1354 | spin_lock_irqsave(lock, flags); | |
1da177e4 LT |
1355 | if (!list_empty(&q->list)) |
1356 | list_del_init(&q->list); | |
19a4fcb5 | 1357 | spin_unlock_irqrestore(lock, flags); |
1da177e4 LT |
1358 | read_unlock(&tasklist_lock); |
1359 | } | |
1360 | q->flags &= ~SIGQUEUE_PREALLOC; | |
1361 | __sigqueue_free(q); | |
1362 | } | |
1363 | ||
54767908 | 1364 | int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) |
1da177e4 LT |
1365 | { |
1366 | unsigned long flags; | |
1367 | int ret = 0; | |
1368 | ||
1da177e4 | 1369 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
e56d0903 IM |
1370 | |
1371 | /* | |
1372 | * The rcu based delayed sighand destroy makes it possible to | |
1373 | * run this without tasklist lock held. The task struct itself | |
1374 | * cannot go away as create_timer did get_task_struct(). | |
1375 | * | |
1376 | * We return -1, when the task is marked exiting, so | |
1377 | * posix_timer_event can redirect it to the group leader | |
1378 | */ | |
1379 | rcu_read_lock(); | |
e752dd6c | 1380 | |
54767908 | 1381 | if (!likely(lock_task_sighand(p, &flags))) { |
e752dd6c ON |
1382 | ret = -1; |
1383 | goto out_err; | |
1384 | } | |
1385 | ||
1da177e4 LT |
1386 | if (unlikely(!list_empty(&q->list))) { |
1387 | /* | |
1388 | * If an SI_TIMER entry is already queue just increment | |
1389 | * the overrun count. | |
1390 | */ | |
54767908 | 1391 | BUG_ON(q->info.si_code != SI_TIMER); |
1da177e4 LT |
1392 | q->info.si_overrun++; |
1393 | goto out; | |
e752dd6c | 1394 | } |
1da177e4 LT |
1395 | /* Short-circuit ignored signals. */ |
1396 | if (sig_ignored(p, sig)) { | |
1397 | ret = 1; | |
1398 | goto out; | |
1399 | } | |
1400 | ||
1da177e4 LT |
1401 | list_add_tail(&q->list, &p->pending.list); |
1402 | sigaddset(&p->pending.signal, sig); | |
1403 | if (!sigismember(&p->blocked, sig)) | |
1404 | signal_wake_up(p, sig == SIGKILL); | |
1405 | ||
1406 | out: | |
54767908 | 1407 | unlock_task_sighand(p, &flags); |
e752dd6c | 1408 | out_err: |
e56d0903 | 1409 | rcu_read_unlock(); |
e752dd6c ON |
1410 | |
1411 | return ret; | |
1da177e4 LT |
1412 | } |
1413 | ||
1414 | int | |
1415 | send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |
1416 | { | |
1417 | unsigned long flags; | |
1418 | int ret = 0; | |
1419 | ||
1420 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | |
e56d0903 | 1421 | |
1da177e4 | 1422 | read_lock(&tasklist_lock); |
e56d0903 | 1423 | /* Since it_lock is held, p->sighand cannot be NULL. */ |
1da177e4 LT |
1424 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1425 | handle_stop_signal(sig, p); | |
1426 | ||
1427 | /* Short-circuit ignored signals. */ | |
1428 | if (sig_ignored(p, sig)) { | |
1429 | ret = 1; | |
1430 | goto out; | |
1431 | } | |
1432 | ||
1433 | if (unlikely(!list_empty(&q->list))) { | |
1434 | /* | |
1435 | * If an SI_TIMER entry is already queue just increment | |
1436 | * the overrun count. Other uses should not try to | |
1437 | * send the signal multiple times. | |
1438 | */ | |
fda8bd78 | 1439 | BUG_ON(q->info.si_code != SI_TIMER); |
1da177e4 LT |
1440 | q->info.si_overrun++; |
1441 | goto out; | |
1442 | } | |
1443 | ||
1444 | /* | |
1445 | * Put this signal on the shared-pending queue. | |
1446 | * We always use the shared queue for process-wide signals, | |
1447 | * to avoid several races. | |
1448 | */ | |
1da177e4 LT |
1449 | list_add_tail(&q->list, &p->signal->shared_pending.list); |
1450 | sigaddset(&p->signal->shared_pending.signal, sig); | |
1451 | ||
1452 | __group_complete_signal(sig, p); | |
1453 | out: | |
1454 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1455 | read_unlock(&tasklist_lock); | |
e56d0903 | 1456 | return ret; |
1da177e4 LT |
1457 | } |
1458 | ||
1459 | /* | |
1460 | * Wake up any threads in the parent blocked in wait* syscalls. | |
1461 | */ | |
1462 | static inline void __wake_up_parent(struct task_struct *p, | |
1463 | struct task_struct *parent) | |
1464 | { | |
1465 | wake_up_interruptible_sync(&parent->signal->wait_chldexit); | |
1466 | } | |
1467 | ||
1468 | /* | |
1469 | * Let a parent know about the death of a child. | |
1470 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | |
1471 | */ | |
1472 | ||
1473 | void do_notify_parent(struct task_struct *tsk, int sig) | |
1474 | { | |
1475 | struct siginfo info; | |
1476 | unsigned long flags; | |
1477 | struct sighand_struct *psig; | |
1478 | ||
1479 | BUG_ON(sig == -1); | |
1480 | ||
1481 | /* do_notify_parent_cldstop should have been called instead. */ | |
1482 | BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); | |
1483 | ||
1484 | BUG_ON(!tsk->ptrace && | |
1485 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); | |
1486 | ||
1487 | info.si_signo = sig; | |
1488 | info.si_errno = 0; | |
1489 | info.si_pid = tsk->pid; | |
1490 | info.si_uid = tsk->uid; | |
1491 | ||
1492 | /* FIXME: find out whether or not this is supposed to be c*time. */ | |
1493 | info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, | |
1494 | tsk->signal->utime)); | |
1495 | info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, | |
1496 | tsk->signal->stime)); | |
1497 | ||
1498 | info.si_status = tsk->exit_code & 0x7f; | |
1499 | if (tsk->exit_code & 0x80) | |
1500 | info.si_code = CLD_DUMPED; | |
1501 | else if (tsk->exit_code & 0x7f) | |
1502 | info.si_code = CLD_KILLED; | |
1503 | else { | |
1504 | info.si_code = CLD_EXITED; | |
1505 | info.si_status = tsk->exit_code >> 8; | |
1506 | } | |
1507 | ||
1508 | psig = tsk->parent->sighand; | |
1509 | spin_lock_irqsave(&psig->siglock, flags); | |
7ed0175a | 1510 | if (!tsk->ptrace && sig == SIGCHLD && |
1da177e4 LT |
1511 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1512 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { | |
1513 | /* | |
1514 | * We are exiting and our parent doesn't care. POSIX.1 | |
1515 | * defines special semantics for setting SIGCHLD to SIG_IGN | |
1516 | * or setting the SA_NOCLDWAIT flag: we should be reaped | |
1517 | * automatically and not left for our parent's wait4 call. | |
1518 | * Rather than having the parent do it as a magic kind of | |
1519 | * signal handler, we just set this to tell do_exit that we | |
1520 | * can be cleaned up without becoming a zombie. Note that | |
1521 | * we still call __wake_up_parent in this case, because a | |
1522 | * blocked sys_wait4 might now return -ECHILD. | |
1523 | * | |
1524 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT | |
1525 | * is implementation-defined: we do (if you don't want | |
1526 | * it, just use SIG_IGN instead). | |
1527 | */ | |
1528 | tsk->exit_signal = -1; | |
1529 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) | |
1530 | sig = 0; | |
1531 | } | |
7ed20e1a | 1532 | if (valid_signal(sig) && sig > 0) |
1da177e4 LT |
1533 | __group_send_sig_info(sig, &info, tsk->parent); |
1534 | __wake_up_parent(tsk, tsk->parent); | |
1535 | spin_unlock_irqrestore(&psig->siglock, flags); | |
1536 | } | |
1537 | ||
a1d5e21e | 1538 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) |
1da177e4 LT |
1539 | { |
1540 | struct siginfo info; | |
1541 | unsigned long flags; | |
bc505a47 | 1542 | struct task_struct *parent; |
1da177e4 LT |
1543 | struct sighand_struct *sighand; |
1544 | ||
a1d5e21e | 1545 | if (tsk->ptrace & PT_PTRACED) |
bc505a47 ON |
1546 | parent = tsk->parent; |
1547 | else { | |
1548 | tsk = tsk->group_leader; | |
1549 | parent = tsk->real_parent; | |
1550 | } | |
1551 | ||
1da177e4 LT |
1552 | info.si_signo = SIGCHLD; |
1553 | info.si_errno = 0; | |
1554 | info.si_pid = tsk->pid; | |
1555 | info.si_uid = tsk->uid; | |
1556 | ||
1557 | /* FIXME: find out whether or not this is supposed to be c*time. */ | |
1558 | info.si_utime = cputime_to_jiffies(tsk->utime); | |
1559 | info.si_stime = cputime_to_jiffies(tsk->stime); | |
1560 | ||
1561 | info.si_code = why; | |
1562 | switch (why) { | |
1563 | case CLD_CONTINUED: | |
1564 | info.si_status = SIGCONT; | |
1565 | break; | |
1566 | case CLD_STOPPED: | |
1567 | info.si_status = tsk->signal->group_exit_code & 0x7f; | |
1568 | break; | |
1569 | case CLD_TRAPPED: | |
1570 | info.si_status = tsk->exit_code & 0x7f; | |
1571 | break; | |
1572 | default: | |
1573 | BUG(); | |
1574 | } | |
1575 | ||
1576 | sighand = parent->sighand; | |
1577 | spin_lock_irqsave(&sighand->siglock, flags); | |
1578 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && | |
1579 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) | |
1580 | __group_send_sig_info(SIGCHLD, &info, parent); | |
1581 | /* | |
1582 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. | |
1583 | */ | |
1584 | __wake_up_parent(tsk, parent); | |
1585 | spin_unlock_irqrestore(&sighand->siglock, flags); | |
1586 | } | |
1587 | ||
d5f70c00 ON |
1588 | static inline int may_ptrace_stop(void) |
1589 | { | |
1590 | if (!likely(current->ptrace & PT_PTRACED)) | |
1591 | return 0; | |
1592 | ||
1593 | if (unlikely(current->parent == current->real_parent && | |
1594 | (current->ptrace & PT_ATTACHED))) | |
1595 | return 0; | |
1596 | ||
1597 | if (unlikely(current->signal == current->parent->signal) && | |
1598 | unlikely(current->signal->flags & SIGNAL_GROUP_EXIT)) | |
1599 | return 0; | |
1600 | ||
1601 | /* | |
1602 | * Are we in the middle of do_coredump? | |
1603 | * If so and our tracer is also part of the coredump stopping | |
1604 | * is a deadlock situation, and pointless because our tracer | |
1605 | * is dead so don't allow us to stop. | |
1606 | * If SIGKILL was already sent before the caller unlocked | |
1607 | * ->siglock we must see ->core_waiters != 0. Otherwise it | |
1608 | * is safe to enter schedule(). | |
1609 | */ | |
1610 | if (unlikely(current->mm->core_waiters) && | |
1611 | unlikely(current->mm == current->parent->mm)) | |
1612 | return 0; | |
1613 | ||
1614 | return 1; | |
1615 | } | |
1616 | ||
1da177e4 LT |
1617 | /* |
1618 | * This must be called with current->sighand->siglock held. | |
1619 | * | |
1620 | * This should be the path for all ptrace stops. | |
1621 | * We always set current->last_siginfo while stopped here. | |
1622 | * That makes it a way to test a stopped process for | |
1623 | * being ptrace-stopped vs being job-control-stopped. | |
1624 | * | |
1625 | * If we actually decide not to stop at all because the tracer is gone, | |
1626 | * we leave nostop_code in current->exit_code. | |
1627 | */ | |
1628 | static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) | |
1629 | { | |
1630 | /* | |
1631 | * If there is a group stop in progress, | |
1632 | * we must participate in the bookkeeping. | |
1633 | */ | |
1634 | if (current->signal->group_stop_count > 0) | |
1635 | --current->signal->group_stop_count; | |
1636 | ||
1637 | current->last_siginfo = info; | |
1638 | current->exit_code = exit_code; | |
1639 | ||
1640 | /* Let the debugger run. */ | |
1641 | set_current_state(TASK_TRACED); | |
1642 | spin_unlock_irq(¤t->sighand->siglock); | |
85b6bce3 | 1643 | try_to_freeze(); |
1da177e4 | 1644 | read_lock(&tasklist_lock); |
d5f70c00 | 1645 | if (may_ptrace_stop()) { |
a1d5e21e | 1646 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
1da177e4 LT |
1647 | read_unlock(&tasklist_lock); |
1648 | schedule(); | |
1649 | } else { | |
1650 | /* | |
1651 | * By the time we got the lock, our tracer went away. | |
1652 | * Don't stop here. | |
1653 | */ | |
1654 | read_unlock(&tasklist_lock); | |
1655 | set_current_state(TASK_RUNNING); | |
1656 | current->exit_code = nostop_code; | |
1657 | } | |
1658 | ||
1659 | /* | |
1660 | * We are back. Now reacquire the siglock before touching | |
1661 | * last_siginfo, so that we are sure to have synchronized with | |
1662 | * any signal-sending on another CPU that wants to examine it. | |
1663 | */ | |
1664 | spin_lock_irq(¤t->sighand->siglock); | |
1665 | current->last_siginfo = NULL; | |
1666 | ||
1667 | /* | |
1668 | * Queued signals ignored us while we were stopped for tracing. | |
1669 | * So check for any that we should take before resuming user mode. | |
1670 | */ | |
1671 | recalc_sigpending(); | |
1672 | } | |
1673 | ||
1674 | void ptrace_notify(int exit_code) | |
1675 | { | |
1676 | siginfo_t info; | |
1677 | ||
1678 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); | |
1679 | ||
1680 | memset(&info, 0, sizeof info); | |
1681 | info.si_signo = SIGTRAP; | |
1682 | info.si_code = exit_code; | |
1683 | info.si_pid = current->pid; | |
1684 | info.si_uid = current->uid; | |
1685 | ||
1686 | /* Let the debugger run. */ | |
1687 | spin_lock_irq(¤t->sighand->siglock); | |
1688 | ptrace_stop(exit_code, 0, &info); | |
1689 | spin_unlock_irq(¤t->sighand->siglock); | |
1690 | } | |
1691 | ||
1da177e4 LT |
1692 | static void |
1693 | finish_stop(int stop_count) | |
1694 | { | |
1695 | /* | |
1696 | * If there are no other threads in the group, or if there is | |
1697 | * a group stop in progress and we are the last to stop, | |
1698 | * report to the parent. When ptraced, every thread reports itself. | |
1699 | */ | |
a1d5e21e ON |
1700 | if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { |
1701 | read_lock(&tasklist_lock); | |
1702 | do_notify_parent_cldstop(current, CLD_STOPPED); | |
1703 | read_unlock(&tasklist_lock); | |
1704 | } | |
bc505a47 | 1705 | |
1da177e4 LT |
1706 | schedule(); |
1707 | /* | |
1708 | * Now we don't run again until continued. | |
1709 | */ | |
1710 | current->exit_code = 0; | |
1711 | } | |
1712 | ||
1713 | /* | |
1714 | * This performs the stopping for SIGSTOP and other stop signals. | |
1715 | * We have to stop all threads in the thread group. | |
1716 | * Returns nonzero if we've actually stopped and released the siglock. | |
1717 | * Returns zero if we didn't stop and still hold the siglock. | |
1718 | */ | |
a122b341 | 1719 | static int do_signal_stop(int signr) |
1da177e4 LT |
1720 | { |
1721 | struct signal_struct *sig = current->signal; | |
dac27f4a | 1722 | int stop_count; |
1da177e4 LT |
1723 | |
1724 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) | |
1725 | return 0; | |
1726 | ||
1727 | if (sig->group_stop_count > 0) { | |
1728 | /* | |
1729 | * There is a group stop in progress. We don't need to | |
1730 | * start another one. | |
1731 | */ | |
1da177e4 | 1732 | stop_count = --sig->group_stop_count; |
dac27f4a | 1733 | } else { |
1da177e4 LT |
1734 | /* |
1735 | * There is no group stop already in progress. | |
a122b341 | 1736 | * We must initiate one now. |
1da177e4 LT |
1737 | */ |
1738 | struct task_struct *t; | |
1739 | ||
a122b341 | 1740 | sig->group_exit_code = signr; |
1da177e4 | 1741 | |
a122b341 ON |
1742 | stop_count = 0; |
1743 | for (t = next_thread(current); t != current; t = next_thread(t)) | |
1da177e4 | 1744 | /* |
a122b341 ON |
1745 | * Setting state to TASK_STOPPED for a group |
1746 | * stop is always done with the siglock held, | |
1747 | * so this check has no races. | |
1da177e4 | 1748 | */ |
a122b341 ON |
1749 | if (!t->exit_state && |
1750 | !(t->state & (TASK_STOPPED|TASK_TRACED))) { | |
1751 | stop_count++; | |
1752 | signal_wake_up(t, 0); | |
1753 | } | |
1754 | sig->group_stop_count = stop_count; | |
1da177e4 LT |
1755 | } |
1756 | ||
dac27f4a ON |
1757 | if (stop_count == 0) |
1758 | sig->flags = SIGNAL_STOP_STOPPED; | |
1759 | current->exit_code = sig->group_exit_code; | |
1760 | __set_current_state(TASK_STOPPED); | |
1761 | ||
1762 | spin_unlock_irq(¤t->sighand->siglock); | |
1da177e4 LT |
1763 | finish_stop(stop_count); |
1764 | return 1; | |
1765 | } | |
1766 | ||
1767 | /* | |
1768 | * Do appropriate magic when group_stop_count > 0. | |
1769 | * We return nonzero if we stopped, after releasing the siglock. | |
1770 | * We return zero if we still hold the siglock and should look | |
1771 | * for another signal without checking group_stop_count again. | |
1772 | */ | |
858119e1 | 1773 | static int handle_group_stop(void) |
1da177e4 LT |
1774 | { |
1775 | int stop_count; | |
1776 | ||
1777 | if (current->signal->group_exit_task == current) { | |
1778 | /* | |
1779 | * Group stop is so we can do a core dump, | |
1780 | * We are the initiating thread, so get on with it. | |
1781 | */ | |
1782 | current->signal->group_exit_task = NULL; | |
1783 | return 0; | |
1784 | } | |
1785 | ||
1786 | if (current->signal->flags & SIGNAL_GROUP_EXIT) | |
1787 | /* | |
1788 | * Group stop is so another thread can do a core dump, | |
1789 | * or else we are racing against a death signal. | |
1790 | * Just punt the stop so we can get the next signal. | |
1791 | */ | |
1792 | return 0; | |
1793 | ||
1794 | /* | |
1795 | * There is a group stop in progress. We stop | |
1796 | * without any associated signal being in our queue. | |
1797 | */ | |
1798 | stop_count = --current->signal->group_stop_count; | |
1799 | if (stop_count == 0) | |
1800 | current->signal->flags = SIGNAL_STOP_STOPPED; | |
1801 | current->exit_code = current->signal->group_exit_code; | |
1802 | set_current_state(TASK_STOPPED); | |
1803 | spin_unlock_irq(¤t->sighand->siglock); | |
1804 | finish_stop(stop_count); | |
1805 | return 1; | |
1806 | } | |
1807 | ||
1808 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | |
1809 | struct pt_regs *regs, void *cookie) | |
1810 | { | |
1811 | sigset_t *mask = ¤t->blocked; | |
1812 | int signr = 0; | |
1813 | ||
fc558a74 RW |
1814 | try_to_freeze(); |
1815 | ||
1da177e4 LT |
1816 | relock: |
1817 | spin_lock_irq(¤t->sighand->siglock); | |
1818 | for (;;) { | |
1819 | struct k_sigaction *ka; | |
1820 | ||
1821 | if (unlikely(current->signal->group_stop_count > 0) && | |
1822 | handle_group_stop()) | |
1823 | goto relock; | |
1824 | ||
1825 | signr = dequeue_signal(current, mask, info); | |
1826 | ||
1827 | if (!signr) | |
1828 | break; /* will return 0 */ | |
1829 | ||
1830 | if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { | |
1831 | ptrace_signal_deliver(regs, cookie); | |
1832 | ||
1833 | /* Let the debugger run. */ | |
1834 | ptrace_stop(signr, signr, info); | |
1835 | ||
e57a5059 | 1836 | /* We're back. Did the debugger cancel the sig? */ |
1da177e4 | 1837 | signr = current->exit_code; |
e57a5059 | 1838 | if (signr == 0) |
1da177e4 LT |
1839 | continue; |
1840 | ||
1841 | current->exit_code = 0; | |
1842 | ||
1843 | /* Update the siginfo structure if the signal has | |
1844 | changed. If the debugger wanted something | |
1845 | specific in the siginfo structure then it should | |
1846 | have updated *info via PTRACE_SETSIGINFO. */ | |
1847 | if (signr != info->si_signo) { | |
1848 | info->si_signo = signr; | |
1849 | info->si_errno = 0; | |
1850 | info->si_code = SI_USER; | |
1851 | info->si_pid = current->parent->pid; | |
1852 | info->si_uid = current->parent->uid; | |
1853 | } | |
1854 | ||
1855 | /* If the (new) signal is now blocked, requeue it. */ | |
1856 | if (sigismember(¤t->blocked, signr)) { | |
1857 | specific_send_sig_info(signr, info, current); | |
1858 | continue; | |
1859 | } | |
1860 | } | |
1861 | ||
1862 | ka = ¤t->sighand->action[signr-1]; | |
1863 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | |
1864 | continue; | |
1865 | if (ka->sa.sa_handler != SIG_DFL) { | |
1866 | /* Run the handler. */ | |
1867 | *return_ka = *ka; | |
1868 | ||
1869 | if (ka->sa.sa_flags & SA_ONESHOT) | |
1870 | ka->sa.sa_handler = SIG_DFL; | |
1871 | ||
1872 | break; /* will return non-zero "signr" value */ | |
1873 | } | |
1874 | ||
1875 | /* | |
1876 | * Now we are doing the default action for this signal. | |
1877 | */ | |
1878 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ | |
1879 | continue; | |
1880 | ||
1881 | /* Init gets no signals it doesn't want. */ | |
fef23e7f | 1882 | if (current == child_reaper) |
1da177e4 LT |
1883 | continue; |
1884 | ||
1885 | if (sig_kernel_stop(signr)) { | |
1886 | /* | |
1887 | * The default action is to stop all threads in | |
1888 | * the thread group. The job control signals | |
1889 | * do nothing in an orphaned pgrp, but SIGSTOP | |
1890 | * always works. Note that siglock needs to be | |
1891 | * dropped during the call to is_orphaned_pgrp() | |
1892 | * because of lock ordering with tasklist_lock. | |
1893 | * This allows an intervening SIGCONT to be posted. | |
1894 | * We need to check for that and bail out if necessary. | |
1895 | */ | |
1896 | if (signr != SIGSTOP) { | |
1897 | spin_unlock_irq(¤t->sighand->siglock); | |
1898 | ||
1899 | /* signals can be posted during this window */ | |
1900 | ||
1901 | if (is_orphaned_pgrp(process_group(current))) | |
1902 | goto relock; | |
1903 | ||
1904 | spin_lock_irq(¤t->sighand->siglock); | |
1905 | } | |
1906 | ||
1907 | if (likely(do_signal_stop(signr))) { | |
1908 | /* It released the siglock. */ | |
1909 | goto relock; | |
1910 | } | |
1911 | ||
1912 | /* | |
1913 | * We didn't actually stop, due to a race | |
1914 | * with SIGCONT or something like that. | |
1915 | */ | |
1916 | continue; | |
1917 | } | |
1918 | ||
1919 | spin_unlock_irq(¤t->sighand->siglock); | |
1920 | ||
1921 | /* | |
1922 | * Anything else is fatal, maybe with a core dump. | |
1923 | */ | |
1924 | current->flags |= PF_SIGNALED; | |
1925 | if (sig_kernel_coredump(signr)) { | |
1926 | /* | |
1927 | * If it was able to dump core, this kills all | |
1928 | * other threads in the group and synchronizes with | |
1929 | * their demise. If we lost the race with another | |
1930 | * thread getting here, it set group_exit_code | |
1931 | * first and our do_group_exit call below will use | |
1932 | * that value and ignore the one we pass it. | |
1933 | */ | |
1934 | do_coredump((long)signr, signr, regs); | |
1935 | } | |
1936 | ||
1937 | /* | |
1938 | * Death signals, no core dump. | |
1939 | */ | |
1940 | do_group_exit(signr); | |
1941 | /* NOTREACHED */ | |
1942 | } | |
1943 | spin_unlock_irq(¤t->sighand->siglock); | |
1944 | return signr; | |
1945 | } | |
1946 | ||
1da177e4 LT |
1947 | EXPORT_SYMBOL(recalc_sigpending); |
1948 | EXPORT_SYMBOL_GPL(dequeue_signal); | |
1949 | EXPORT_SYMBOL(flush_signals); | |
1950 | EXPORT_SYMBOL(force_sig); | |
1951 | EXPORT_SYMBOL(kill_pg); | |
1952 | EXPORT_SYMBOL(kill_proc); | |
1953 | EXPORT_SYMBOL(ptrace_notify); | |
1954 | EXPORT_SYMBOL(send_sig); | |
1955 | EXPORT_SYMBOL(send_sig_info); | |
1956 | EXPORT_SYMBOL(sigprocmask); | |
1957 | EXPORT_SYMBOL(block_all_signals); | |
1958 | EXPORT_SYMBOL(unblock_all_signals); | |
1959 | ||
1960 | ||
1961 | /* | |
1962 | * System call entry points. | |
1963 | */ | |
1964 | ||
1965 | asmlinkage long sys_restart_syscall(void) | |
1966 | { | |
1967 | struct restart_block *restart = ¤t_thread_info()->restart_block; | |
1968 | return restart->fn(restart); | |
1969 | } | |
1970 | ||
1971 | long do_no_restart_syscall(struct restart_block *param) | |
1972 | { | |
1973 | return -EINTR; | |
1974 | } | |
1975 | ||
1976 | /* | |
1977 | * We don't need to get the kernel lock - this is all local to this | |
1978 | * particular thread.. (and that's good, because this is _heavily_ | |
1979 | * used by various programs) | |
1980 | */ | |
1981 | ||
1982 | /* | |
1983 | * This is also useful for kernel threads that want to temporarily | |
1984 | * (or permanently) block certain signals. | |
1985 | * | |
1986 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel | |
1987 | * interface happily blocks "unblockable" signals like SIGKILL | |
1988 | * and friends. | |
1989 | */ | |
1990 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | |
1991 | { | |
1992 | int error; | |
1da177e4 LT |
1993 | |
1994 | spin_lock_irq(¤t->sighand->siglock); | |
a26fd335 ON |
1995 | if (oldset) |
1996 | *oldset = current->blocked; | |
1997 | ||
1da177e4 LT |
1998 | error = 0; |
1999 | switch (how) { | |
2000 | case SIG_BLOCK: | |
2001 | sigorsets(¤t->blocked, ¤t->blocked, set); | |
2002 | break; | |
2003 | case SIG_UNBLOCK: | |
2004 | signandsets(¤t->blocked, ¤t->blocked, set); | |
2005 | break; | |
2006 | case SIG_SETMASK: | |
2007 | current->blocked = *set; | |
2008 | break; | |
2009 | default: | |
2010 | error = -EINVAL; | |
2011 | } | |
2012 | recalc_sigpending(); | |
2013 | spin_unlock_irq(¤t->sighand->siglock); | |
a26fd335 | 2014 | |
1da177e4 LT |
2015 | return error; |
2016 | } | |
2017 | ||
2018 | asmlinkage long | |
2019 | sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) | |
2020 | { | |
2021 | int error = -EINVAL; | |
2022 | sigset_t old_set, new_set; | |
2023 | ||
2024 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2025 | if (sigsetsize != sizeof(sigset_t)) | |
2026 | goto out; | |
2027 | ||
2028 | if (set) { | |
2029 | error = -EFAULT; | |
2030 | if (copy_from_user(&new_set, set, sizeof(*set))) | |
2031 | goto out; | |
2032 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
2033 | ||
2034 | error = sigprocmask(how, &new_set, &old_set); | |
2035 | if (error) | |
2036 | goto out; | |
2037 | if (oset) | |
2038 | goto set_old; | |
2039 | } else if (oset) { | |
2040 | spin_lock_irq(¤t->sighand->siglock); | |
2041 | old_set = current->blocked; | |
2042 | spin_unlock_irq(¤t->sighand->siglock); | |
2043 | ||
2044 | set_old: | |
2045 | error = -EFAULT; | |
2046 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | |
2047 | goto out; | |
2048 | } | |
2049 | error = 0; | |
2050 | out: | |
2051 | return error; | |
2052 | } | |
2053 | ||
2054 | long do_sigpending(void __user *set, unsigned long sigsetsize) | |
2055 | { | |
2056 | long error = -EINVAL; | |
2057 | sigset_t pending; | |
2058 | ||
2059 | if (sigsetsize > sizeof(sigset_t)) | |
2060 | goto out; | |
2061 | ||
2062 | spin_lock_irq(¤t->sighand->siglock); | |
2063 | sigorsets(&pending, ¤t->pending.signal, | |
2064 | ¤t->signal->shared_pending.signal); | |
2065 | spin_unlock_irq(¤t->sighand->siglock); | |
2066 | ||
2067 | /* Outside the lock because only this thread touches it. */ | |
2068 | sigandsets(&pending, ¤t->blocked, &pending); | |
2069 | ||
2070 | error = -EFAULT; | |
2071 | if (!copy_to_user(set, &pending, sigsetsize)) | |
2072 | error = 0; | |
2073 | ||
2074 | out: | |
2075 | return error; | |
2076 | } | |
2077 | ||
2078 | asmlinkage long | |
2079 | sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) | |
2080 | { | |
2081 | return do_sigpending(set, sigsetsize); | |
2082 | } | |
2083 | ||
2084 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER | |
2085 | ||
2086 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |
2087 | { | |
2088 | int err; | |
2089 | ||
2090 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) | |
2091 | return -EFAULT; | |
2092 | if (from->si_code < 0) | |
2093 | return __copy_to_user(to, from, sizeof(siginfo_t)) | |
2094 | ? -EFAULT : 0; | |
2095 | /* | |
2096 | * If you change siginfo_t structure, please be sure | |
2097 | * this code is fixed accordingly. | |
2098 | * It should never copy any pad contained in the structure | |
2099 | * to avoid security leaks, but must copy the generic | |
2100 | * 3 ints plus the relevant union member. | |
2101 | */ | |
2102 | err = __put_user(from->si_signo, &to->si_signo); | |
2103 | err |= __put_user(from->si_errno, &to->si_errno); | |
2104 | err |= __put_user((short)from->si_code, &to->si_code); | |
2105 | switch (from->si_code & __SI_MASK) { | |
2106 | case __SI_KILL: | |
2107 | err |= __put_user(from->si_pid, &to->si_pid); | |
2108 | err |= __put_user(from->si_uid, &to->si_uid); | |
2109 | break; | |
2110 | case __SI_TIMER: | |
2111 | err |= __put_user(from->si_tid, &to->si_tid); | |
2112 | err |= __put_user(from->si_overrun, &to->si_overrun); | |
2113 | err |= __put_user(from->si_ptr, &to->si_ptr); | |
2114 | break; | |
2115 | case __SI_POLL: | |
2116 | err |= __put_user(from->si_band, &to->si_band); | |
2117 | err |= __put_user(from->si_fd, &to->si_fd); | |
2118 | break; | |
2119 | case __SI_FAULT: | |
2120 | err |= __put_user(from->si_addr, &to->si_addr); | |
2121 | #ifdef __ARCH_SI_TRAPNO | |
2122 | err |= __put_user(from->si_trapno, &to->si_trapno); | |
2123 | #endif | |
2124 | break; | |
2125 | case __SI_CHLD: | |
2126 | err |= __put_user(from->si_pid, &to->si_pid); | |
2127 | err |= __put_user(from->si_uid, &to->si_uid); | |
2128 | err |= __put_user(from->si_status, &to->si_status); | |
2129 | err |= __put_user(from->si_utime, &to->si_utime); | |
2130 | err |= __put_user(from->si_stime, &to->si_stime); | |
2131 | break; | |
2132 | case __SI_RT: /* This is not generated by the kernel as of now. */ | |
2133 | case __SI_MESGQ: /* But this is */ | |
2134 | err |= __put_user(from->si_pid, &to->si_pid); | |
2135 | err |= __put_user(from->si_uid, &to->si_uid); | |
2136 | err |= __put_user(from->si_ptr, &to->si_ptr); | |
2137 | break; | |
2138 | default: /* this is just in case for now ... */ | |
2139 | err |= __put_user(from->si_pid, &to->si_pid); | |
2140 | err |= __put_user(from->si_uid, &to->si_uid); | |
2141 | break; | |
2142 | } | |
2143 | return err; | |
2144 | } | |
2145 | ||
2146 | #endif | |
2147 | ||
2148 | asmlinkage long | |
2149 | sys_rt_sigtimedwait(const sigset_t __user *uthese, | |
2150 | siginfo_t __user *uinfo, | |
2151 | const struct timespec __user *uts, | |
2152 | size_t sigsetsize) | |
2153 | { | |
2154 | int ret, sig; | |
2155 | sigset_t these; | |
2156 | struct timespec ts; | |
2157 | siginfo_t info; | |
2158 | long timeout = 0; | |
2159 | ||
2160 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2161 | if (sigsetsize != sizeof(sigset_t)) | |
2162 | return -EINVAL; | |
2163 | ||
2164 | if (copy_from_user(&these, uthese, sizeof(these))) | |
2165 | return -EFAULT; | |
2166 | ||
2167 | /* | |
2168 | * Invert the set of allowed signals to get those we | |
2169 | * want to block. | |
2170 | */ | |
2171 | sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
2172 | signotset(&these); | |
2173 | ||
2174 | if (uts) { | |
2175 | if (copy_from_user(&ts, uts, sizeof(ts))) | |
2176 | return -EFAULT; | |
2177 | if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 | |
2178 | || ts.tv_sec < 0) | |
2179 | return -EINVAL; | |
2180 | } | |
2181 | ||
2182 | spin_lock_irq(¤t->sighand->siglock); | |
2183 | sig = dequeue_signal(current, &these, &info); | |
2184 | if (!sig) { | |
2185 | timeout = MAX_SCHEDULE_TIMEOUT; | |
2186 | if (uts) | |
2187 | timeout = (timespec_to_jiffies(&ts) | |
2188 | + (ts.tv_sec || ts.tv_nsec)); | |
2189 | ||
2190 | if (timeout) { | |
2191 | /* None ready -- temporarily unblock those we're | |
2192 | * interested while we are sleeping in so that we'll | |
2193 | * be awakened when they arrive. */ | |
2194 | current->real_blocked = current->blocked; | |
2195 | sigandsets(¤t->blocked, ¤t->blocked, &these); | |
2196 | recalc_sigpending(); | |
2197 | spin_unlock_irq(¤t->sighand->siglock); | |
2198 | ||
75bcc8c5 | 2199 | timeout = schedule_timeout_interruptible(timeout); |
1da177e4 | 2200 | |
1da177e4 LT |
2201 | spin_lock_irq(¤t->sighand->siglock); |
2202 | sig = dequeue_signal(current, &these, &info); | |
2203 | current->blocked = current->real_blocked; | |
2204 | siginitset(¤t->real_blocked, 0); | |
2205 | recalc_sigpending(); | |
2206 | } | |
2207 | } | |
2208 | spin_unlock_irq(¤t->sighand->siglock); | |
2209 | ||
2210 | if (sig) { | |
2211 | ret = sig; | |
2212 | if (uinfo) { | |
2213 | if (copy_siginfo_to_user(uinfo, &info)) | |
2214 | ret = -EFAULT; | |
2215 | } | |
2216 | } else { | |
2217 | ret = -EAGAIN; | |
2218 | if (timeout) | |
2219 | ret = -EINTR; | |
2220 | } | |
2221 | ||
2222 | return ret; | |
2223 | } | |
2224 | ||
2225 | asmlinkage long | |
2226 | sys_kill(int pid, int sig) | |
2227 | { | |
2228 | struct siginfo info; | |
2229 | ||
2230 | info.si_signo = sig; | |
2231 | info.si_errno = 0; | |
2232 | info.si_code = SI_USER; | |
2233 | info.si_pid = current->tgid; | |
2234 | info.si_uid = current->uid; | |
2235 | ||
2236 | return kill_something_info(sig, &info, pid); | |
2237 | } | |
2238 | ||
6dd69f10 | 2239 | static int do_tkill(int tgid, int pid, int sig) |
1da177e4 | 2240 | { |
1da177e4 | 2241 | int error; |
6dd69f10 | 2242 | struct siginfo info; |
1da177e4 LT |
2243 | struct task_struct *p; |
2244 | ||
6dd69f10 | 2245 | error = -ESRCH; |
1da177e4 LT |
2246 | info.si_signo = sig; |
2247 | info.si_errno = 0; | |
2248 | info.si_code = SI_TKILL; | |
2249 | info.si_pid = current->tgid; | |
2250 | info.si_uid = current->uid; | |
2251 | ||
2252 | read_lock(&tasklist_lock); | |
2253 | p = find_task_by_pid(pid); | |
6dd69f10 | 2254 | if (p && (tgid <= 0 || p->tgid == tgid)) { |
1da177e4 LT |
2255 | error = check_kill_permission(sig, &info, p); |
2256 | /* | |
2257 | * The null signal is a permissions and process existence | |
2258 | * probe. No signal is actually delivered. | |
2259 | */ | |
2260 | if (!error && sig && p->sighand) { | |
2261 | spin_lock_irq(&p->sighand->siglock); | |
2262 | handle_stop_signal(sig, p); | |
2263 | error = specific_send_sig_info(sig, &info, p); | |
2264 | spin_unlock_irq(&p->sighand->siglock); | |
2265 | } | |
2266 | } | |
2267 | read_unlock(&tasklist_lock); | |
6dd69f10 | 2268 | |
1da177e4 LT |
2269 | return error; |
2270 | } | |
2271 | ||
6dd69f10 VL |
2272 | /** |
2273 | * sys_tgkill - send signal to one specific thread | |
2274 | * @tgid: the thread group ID of the thread | |
2275 | * @pid: the PID of the thread | |
2276 | * @sig: signal to be sent | |
2277 | * | |
2278 | * This syscall also checks the tgid and returns -ESRCH even if the PID | |
2279 | * exists but it's not belonging to the target process anymore. This | |
2280 | * method solves the problem of threads exiting and PIDs getting reused. | |
2281 | */ | |
2282 | asmlinkage long sys_tgkill(int tgid, int pid, int sig) | |
2283 | { | |
2284 | /* This is only valid for single tasks */ | |
2285 | if (pid <= 0 || tgid <= 0) | |
2286 | return -EINVAL; | |
2287 | ||
2288 | return do_tkill(tgid, pid, sig); | |
2289 | } | |
2290 | ||
1da177e4 LT |
2291 | /* |
2292 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | |
2293 | */ | |
2294 | asmlinkage long | |
2295 | sys_tkill(int pid, int sig) | |
2296 | { | |
1da177e4 LT |
2297 | /* This is only valid for single tasks */ |
2298 | if (pid <= 0) | |
2299 | return -EINVAL; | |
2300 | ||
6dd69f10 | 2301 | return do_tkill(0, pid, sig); |
1da177e4 LT |
2302 | } |
2303 | ||
2304 | asmlinkage long | |
2305 | sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) | |
2306 | { | |
2307 | siginfo_t info; | |
2308 | ||
2309 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) | |
2310 | return -EFAULT; | |
2311 | ||
2312 | /* Not even root can pretend to send signals from the kernel. | |
2313 | Nor can they impersonate a kill(), which adds source info. */ | |
2314 | if (info.si_code >= 0) | |
2315 | return -EPERM; | |
2316 | info.si_signo = sig; | |
2317 | ||
2318 | /* POSIX.1b doesn't mention process groups. */ | |
2319 | return kill_proc_info(sig, &info, pid); | |
2320 | } | |
2321 | ||
88531f72 | 2322 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
1da177e4 LT |
2323 | { |
2324 | struct k_sigaction *k; | |
71fabd5e | 2325 | sigset_t mask; |
1da177e4 | 2326 | |
7ed20e1a | 2327 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
1da177e4 LT |
2328 | return -EINVAL; |
2329 | ||
2330 | k = ¤t->sighand->action[sig-1]; | |
2331 | ||
2332 | spin_lock_irq(¤t->sighand->siglock); | |
2333 | if (signal_pending(current)) { | |
2334 | /* | |
2335 | * If there might be a fatal signal pending on multiple | |
2336 | * threads, make sure we take it before changing the action. | |
2337 | */ | |
2338 | spin_unlock_irq(¤t->sighand->siglock); | |
2339 | return -ERESTARTNOINTR; | |
2340 | } | |
2341 | ||
2342 | if (oact) | |
2343 | *oact = *k; | |
2344 | ||
2345 | if (act) { | |
9ac95f2f ON |
2346 | sigdelsetmask(&act->sa.sa_mask, |
2347 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
88531f72 | 2348 | *k = *act; |
1da177e4 LT |
2349 | /* |
2350 | * POSIX 3.3.1.3: | |
2351 | * "Setting a signal action to SIG_IGN for a signal that is | |
2352 | * pending shall cause the pending signal to be discarded, | |
2353 | * whether or not it is blocked." | |
2354 | * | |
2355 | * "Setting a signal action to SIG_DFL for a signal that is | |
2356 | * pending and whose default action is to ignore the signal | |
2357 | * (for example, SIGCHLD), shall cause the pending signal to | |
2358 | * be discarded, whether or not it is blocked" | |
2359 | */ | |
2360 | if (act->sa.sa_handler == SIG_IGN || | |
88531f72 | 2361 | (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { |
1da177e4 | 2362 | struct task_struct *t = current; |
71fabd5e GA |
2363 | sigemptyset(&mask); |
2364 | sigaddset(&mask, sig); | |
2365 | rm_from_queue_full(&mask, &t->signal->shared_pending); | |
1da177e4 | 2366 | do { |
71fabd5e | 2367 | rm_from_queue_full(&mask, &t->pending); |
1da177e4 LT |
2368 | recalc_sigpending_tsk(t); |
2369 | t = next_thread(t); | |
2370 | } while (t != current); | |
1da177e4 | 2371 | } |
1da177e4 LT |
2372 | } |
2373 | ||
2374 | spin_unlock_irq(¤t->sighand->siglock); | |
2375 | return 0; | |
2376 | } | |
2377 | ||
2378 | int | |
2379 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) | |
2380 | { | |
2381 | stack_t oss; | |
2382 | int error; | |
2383 | ||
2384 | if (uoss) { | |
2385 | oss.ss_sp = (void __user *) current->sas_ss_sp; | |
2386 | oss.ss_size = current->sas_ss_size; | |
2387 | oss.ss_flags = sas_ss_flags(sp); | |
2388 | } | |
2389 | ||
2390 | if (uss) { | |
2391 | void __user *ss_sp; | |
2392 | size_t ss_size; | |
2393 | int ss_flags; | |
2394 | ||
2395 | error = -EFAULT; | |
2396 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) | |
2397 | || __get_user(ss_sp, &uss->ss_sp) | |
2398 | || __get_user(ss_flags, &uss->ss_flags) | |
2399 | || __get_user(ss_size, &uss->ss_size)) | |
2400 | goto out; | |
2401 | ||
2402 | error = -EPERM; | |
2403 | if (on_sig_stack(sp)) | |
2404 | goto out; | |
2405 | ||
2406 | error = -EINVAL; | |
2407 | /* | |
2408 | * | |
2409 | * Note - this code used to test ss_flags incorrectly | |
2410 | * old code may have been written using ss_flags==0 | |
2411 | * to mean ss_flags==SS_ONSTACK (as this was the only | |
2412 | * way that worked) - this fix preserves that older | |
2413 | * mechanism | |
2414 | */ | |
2415 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) | |
2416 | goto out; | |
2417 | ||
2418 | if (ss_flags == SS_DISABLE) { | |
2419 | ss_size = 0; | |
2420 | ss_sp = NULL; | |
2421 | } else { | |
2422 | error = -ENOMEM; | |
2423 | if (ss_size < MINSIGSTKSZ) | |
2424 | goto out; | |
2425 | } | |
2426 | ||
2427 | current->sas_ss_sp = (unsigned long) ss_sp; | |
2428 | current->sas_ss_size = ss_size; | |
2429 | } | |
2430 | ||
2431 | if (uoss) { | |
2432 | error = -EFAULT; | |
2433 | if (copy_to_user(uoss, &oss, sizeof(oss))) | |
2434 | goto out; | |
2435 | } | |
2436 | ||
2437 | error = 0; | |
2438 | out: | |
2439 | return error; | |
2440 | } | |
2441 | ||
2442 | #ifdef __ARCH_WANT_SYS_SIGPENDING | |
2443 | ||
2444 | asmlinkage long | |
2445 | sys_sigpending(old_sigset_t __user *set) | |
2446 | { | |
2447 | return do_sigpending(set, sizeof(*set)); | |
2448 | } | |
2449 | ||
2450 | #endif | |
2451 | ||
2452 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | |
2453 | /* Some platforms have their own version with special arguments others | |
2454 | support only sys_rt_sigprocmask. */ | |
2455 | ||
2456 | asmlinkage long | |
2457 | sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) | |
2458 | { | |
2459 | int error; | |
2460 | old_sigset_t old_set, new_set; | |
2461 | ||
2462 | if (set) { | |
2463 | error = -EFAULT; | |
2464 | if (copy_from_user(&new_set, set, sizeof(*set))) | |
2465 | goto out; | |
2466 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
2467 | ||
2468 | spin_lock_irq(¤t->sighand->siglock); | |
2469 | old_set = current->blocked.sig[0]; | |
2470 | ||
2471 | error = 0; | |
2472 | switch (how) { | |
2473 | default: | |
2474 | error = -EINVAL; | |
2475 | break; | |
2476 | case SIG_BLOCK: | |
2477 | sigaddsetmask(¤t->blocked, new_set); | |
2478 | break; | |
2479 | case SIG_UNBLOCK: | |
2480 | sigdelsetmask(¤t->blocked, new_set); | |
2481 | break; | |
2482 | case SIG_SETMASK: | |
2483 | current->blocked.sig[0] = new_set; | |
2484 | break; | |
2485 | } | |
2486 | ||
2487 | recalc_sigpending(); | |
2488 | spin_unlock_irq(¤t->sighand->siglock); | |
2489 | if (error) | |
2490 | goto out; | |
2491 | if (oset) | |
2492 | goto set_old; | |
2493 | } else if (oset) { | |
2494 | old_set = current->blocked.sig[0]; | |
2495 | set_old: | |
2496 | error = -EFAULT; | |
2497 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | |
2498 | goto out; | |
2499 | } | |
2500 | error = 0; | |
2501 | out: | |
2502 | return error; | |
2503 | } | |
2504 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | |
2505 | ||
2506 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION | |
2507 | asmlinkage long | |
2508 | sys_rt_sigaction(int sig, | |
2509 | const struct sigaction __user *act, | |
2510 | struct sigaction __user *oact, | |
2511 | size_t sigsetsize) | |
2512 | { | |
2513 | struct k_sigaction new_sa, old_sa; | |
2514 | int ret = -EINVAL; | |
2515 | ||
2516 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2517 | if (sigsetsize != sizeof(sigset_t)) | |
2518 | goto out; | |
2519 | ||
2520 | if (act) { | |
2521 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) | |
2522 | return -EFAULT; | |
2523 | } | |
2524 | ||
2525 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | |
2526 | ||
2527 | if (!ret && oact) { | |
2528 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) | |
2529 | return -EFAULT; | |
2530 | } | |
2531 | out: | |
2532 | return ret; | |
2533 | } | |
2534 | #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ | |
2535 | ||
2536 | #ifdef __ARCH_WANT_SYS_SGETMASK | |
2537 | ||
2538 | /* | |
2539 | * For backwards compatibility. Functionality superseded by sigprocmask. | |
2540 | */ | |
2541 | asmlinkage long | |
2542 | sys_sgetmask(void) | |
2543 | { | |
2544 | /* SMP safe */ | |
2545 | return current->blocked.sig[0]; | |
2546 | } | |
2547 | ||
2548 | asmlinkage long | |
2549 | sys_ssetmask(int newmask) | |
2550 | { | |
2551 | int old; | |
2552 | ||
2553 | spin_lock_irq(¤t->sighand->siglock); | |
2554 | old = current->blocked.sig[0]; | |
2555 | ||
2556 | siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| | |
2557 | sigmask(SIGSTOP))); | |
2558 | recalc_sigpending(); | |
2559 | spin_unlock_irq(¤t->sighand->siglock); | |
2560 | ||
2561 | return old; | |
2562 | } | |
2563 | #endif /* __ARCH_WANT_SGETMASK */ | |
2564 | ||
2565 | #ifdef __ARCH_WANT_SYS_SIGNAL | |
2566 | /* | |
2567 | * For backwards compatibility. Functionality superseded by sigaction. | |
2568 | */ | |
2569 | asmlinkage unsigned long | |
2570 | sys_signal(int sig, __sighandler_t handler) | |
2571 | { | |
2572 | struct k_sigaction new_sa, old_sa; | |
2573 | int ret; | |
2574 | ||
2575 | new_sa.sa.sa_handler = handler; | |
2576 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | |
c70d3d70 | 2577 | sigemptyset(&new_sa.sa.sa_mask); |
1da177e4 LT |
2578 | |
2579 | ret = do_sigaction(sig, &new_sa, &old_sa); | |
2580 | ||
2581 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; | |
2582 | } | |
2583 | #endif /* __ARCH_WANT_SYS_SIGNAL */ | |
2584 | ||
2585 | #ifdef __ARCH_WANT_SYS_PAUSE | |
2586 | ||
2587 | asmlinkage long | |
2588 | sys_pause(void) | |
2589 | { | |
2590 | current->state = TASK_INTERRUPTIBLE; | |
2591 | schedule(); | |
2592 | return -ERESTARTNOHAND; | |
2593 | } | |
2594 | ||
2595 | #endif | |
2596 | ||
150256d8 DW |
2597 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND |
2598 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) | |
2599 | { | |
2600 | sigset_t newset; | |
2601 | ||
2602 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2603 | if (sigsetsize != sizeof(sigset_t)) | |
2604 | return -EINVAL; | |
2605 | ||
2606 | if (copy_from_user(&newset, unewset, sizeof(newset))) | |
2607 | return -EFAULT; | |
2608 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
2609 | ||
2610 | spin_lock_irq(¤t->sighand->siglock); | |
2611 | current->saved_sigmask = current->blocked; | |
2612 | current->blocked = newset; | |
2613 | recalc_sigpending(); | |
2614 | spin_unlock_irq(¤t->sighand->siglock); | |
2615 | ||
2616 | current->state = TASK_INTERRUPTIBLE; | |
2617 | schedule(); | |
2618 | set_thread_flag(TIF_RESTORE_SIGMASK); | |
2619 | return -ERESTARTNOHAND; | |
2620 | } | |
2621 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ | |
2622 | ||
f269fdd1 DH |
2623 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) |
2624 | { | |
2625 | return NULL; | |
2626 | } | |
2627 | ||
1da177e4 LT |
2628 | void __init signals_init(void) |
2629 | { | |
2630 | sigqueue_cachep = | |
2631 | kmem_cache_create("sigqueue", | |
2632 | sizeof(struct sigqueue), | |
2633 | __alignof__(struct sigqueue), | |
2634 | SLAB_PANIC, NULL, NULL); | |
2635 | } |