2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache *sigqueue_cachep;
43 static int sig_ignored(struct task_struct *t, int sig)
45 void __user * handler;
48 * Tracers always want to know about signals..
50 if (t->ptrace & PT_PTRACED)
54 * Blocked signals are never ignored, since the
55 * signal handler may change by the time it is
58 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
61 /* Is it explicitly or implicitly ignored? */
62 handler = t->sighand->action[sig-1].sa.sa_handler;
63 return handler == SIG_IGN ||
64 (handler == SIG_DFL && sig_kernel_ignore(sig));
68 * Re-calculate pending state from the set of locally pending
69 * signals, globally pending signals, and blocked signals.
71 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
76 switch (_NSIG_WORDS) {
78 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79 ready |= signal->sig[i] &~ blocked->sig[i];
82 case 4: ready = signal->sig[3] &~ blocked->sig[3];
83 ready |= signal->sig[2] &~ blocked->sig[2];
84 ready |= signal->sig[1] &~ blocked->sig[1];
85 ready |= signal->sig[0] &~ blocked->sig[0];
88 case 2: ready = signal->sig[1] &~ blocked->sig[1];
89 ready |= signal->sig[0] &~ blocked->sig[0];
92 case 1: ready = signal->sig[0] &~ blocked->sig[0];
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
99 static int recalc_sigpending_tsk(struct task_struct *t)
101 if (t->signal->group_stop_count > 0 ||
102 PENDING(&t->pending, &t->blocked) ||
103 PENDING(&t->signal->shared_pending, &t->blocked)) {
104 set_tsk_thread_flag(t, TIF_SIGPENDING);
108 * We must never clear the flag in another thread, or in current
109 * when it's possible the current syscall is returning -ERESTART*.
110 * So we don't clear it here, and only callers who know they should do.
116 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
117 * This is superfluous when called on current, the wakeup is a harmless no-op.
119 void recalc_sigpending_and_wake(struct task_struct *t)
121 if (recalc_sigpending_tsk(t))
122 signal_wake_up(t, 0);
125 void recalc_sigpending(void)
127 if (!recalc_sigpending_tsk(current) && !freezing(current))
128 clear_thread_flag(TIF_SIGPENDING);
132 /* Given the mask, find the first available signal that should be serviced. */
134 int next_signal(struct sigpending *pending, sigset_t *mask)
136 unsigned long i, *s, *m, x;
139 s = pending->signal.sig;
141 switch (_NSIG_WORDS) {
143 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
144 if ((x = *s &~ *m) != 0) {
145 sig = ffz(~x) + i*_NSIG_BPW + 1;
150 case 2: if ((x = s[0] &~ m[0]) != 0)
152 else if ((x = s[1] &~ m[1]) != 0)
159 case 1: if ((x = *s &~ *m) != 0)
167 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
170 struct sigqueue *q = NULL;
171 struct user_struct *user;
174 * In order to avoid problems with "switch_user()", we want to make
175 * sure that the compiler doesn't re-load "t->user"
179 atomic_inc(&user->sigpending);
180 if (override_rlimit ||
181 atomic_read(&user->sigpending) <=
182 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
183 q = kmem_cache_alloc(sigqueue_cachep, flags);
184 if (unlikely(q == NULL)) {
185 atomic_dec(&user->sigpending);
187 INIT_LIST_HEAD(&q->list);
189 q->user = get_uid(user);
194 static void __sigqueue_free(struct sigqueue *q)
196 if (q->flags & SIGQUEUE_PREALLOC)
198 atomic_dec(&q->user->sigpending);
200 kmem_cache_free(sigqueue_cachep, q);
203 void flush_sigqueue(struct sigpending *queue)
207 sigemptyset(&queue->signal);
208 while (!list_empty(&queue->list)) {
209 q = list_entry(queue->list.next, struct sigqueue , list);
210 list_del_init(&q->list);
216 * Flush all pending signals for a task.
218 void flush_signals(struct task_struct *t)
222 spin_lock_irqsave(&t->sighand->siglock, flags);
223 clear_tsk_thread_flag(t,TIF_SIGPENDING);
224 flush_sigqueue(&t->pending);
225 flush_sigqueue(&t->signal->shared_pending);
226 spin_unlock_irqrestore(&t->sighand->siglock, flags);
229 void ignore_signals(struct task_struct *t)
233 for (i = 0; i < _NSIG; ++i)
234 t->sighand->action[i].sa.sa_handler = SIG_IGN;
240 * Flush all handlers for a task.
244 flush_signal_handlers(struct task_struct *t, int force_default)
247 struct k_sigaction *ka = &t->sighand->action[0];
248 for (i = _NSIG ; i != 0 ; i--) {
249 if (force_default || ka->sa.sa_handler != SIG_IGN)
250 ka->sa.sa_handler = SIG_DFL;
252 sigemptyset(&ka->sa.sa_mask);
257 int unhandled_signal(struct task_struct *tsk, int sig)
259 if (is_global_init(tsk))
261 if (tsk->ptrace & PT_PTRACED)
263 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
264 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
268 /* Notify the system that a driver wants to block all signals for this
269 * process, and wants to be notified if any signals at all were to be
270 * sent/acted upon. If the notifier routine returns non-zero, then the
271 * signal will be acted upon after all. If the notifier routine returns 0,
272 * then then signal will be blocked. Only one block per process is
273 * allowed. priv is a pointer to private data that the notifier routine
274 * can use to determine if the signal should be blocked or not. */
277 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
281 spin_lock_irqsave(¤t->sighand->siglock, flags);
282 current->notifier_mask = mask;
283 current->notifier_data = priv;
284 current->notifier = notifier;
285 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
288 /* Notify the system that blocking has ended. */
291 unblock_all_signals(void)
295 spin_lock_irqsave(¤t->sighand->siglock, flags);
296 current->notifier = NULL;
297 current->notifier_data = NULL;
299 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
302 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
304 struct sigqueue *q, *first = NULL;
305 int still_pending = 0;
307 if (unlikely(!sigismember(&list->signal, sig)))
311 * Collect the siginfo appropriate to this signal. Check if
312 * there is another siginfo for the same signal.
314 list_for_each_entry(q, &list->list, list) {
315 if (q->info.si_signo == sig) {
324 list_del_init(&first->list);
325 copy_siginfo(info, &first->info);
326 __sigqueue_free(first);
328 sigdelset(&list->signal, sig);
331 /* Ok, it wasn't in the queue. This must be
332 a fast-pathed signal or we must have been
333 out of queue space. So zero out the info.
335 sigdelset(&list->signal, sig);
336 info->si_signo = sig;
345 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
348 int sig = next_signal(pending, mask);
351 if (current->notifier) {
352 if (sigismember(current->notifier_mask, sig)) {
353 if (!(current->notifier)(current->notifier_data)) {
354 clear_thread_flag(TIF_SIGPENDING);
360 if (!collect_signal(sig, pending, info))
368 * Dequeue a signal and return the element to the caller, which is
369 * expected to free it.
371 * All callers have to hold the siglock.
373 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
377 /* We only dequeue private signals from ourselves, we don't let
378 * signalfd steal them
380 signr = __dequeue_signal(&tsk->pending, mask, info);
382 signr = __dequeue_signal(&tsk->signal->shared_pending,
387 * itimers are process shared and we restart periodic
388 * itimers in the signal delivery path to prevent DoS
389 * attacks in the high resolution timer case. This is
390 * compliant with the old way of self restarting
391 * itimers, as the SIGALRM is a legacy signal and only
392 * queued once. Changing the restart behaviour to
393 * restart the timer in the signal dequeue path is
394 * reducing the timer noise on heavy loaded !highres
397 if (unlikely(signr == SIGALRM)) {
398 struct hrtimer *tmr = &tsk->signal->real_timer;
400 if (!hrtimer_is_queued(tmr) &&
401 tsk->signal->it_real_incr.tv64 != 0) {
402 hrtimer_forward(tmr, tmr->base->get_time(),
403 tsk->signal->it_real_incr);
404 hrtimer_restart(tmr);
409 if (signr && unlikely(sig_kernel_stop(signr))) {
411 * Set a marker that we have dequeued a stop signal. Our
412 * caller might release the siglock and then the pending
413 * stop signal it is about to process is no longer in the
414 * pending bitmasks, but must still be cleared by a SIGCONT
415 * (and overruled by a SIGKILL). So those cases clear this
416 * shared flag after we've set it. Note that this flag may
417 * remain set after the signal we return is ignored or
418 * handled. That doesn't matter because its only purpose
419 * is to alert stop-signal processing code when another
420 * processor has come along and cleared the flag.
422 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
423 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
426 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
427 info->si_sys_private){
429 * Release the siglock to ensure proper locking order
430 * of timer locks outside of siglocks. Note, we leave
431 * irqs disabled here, since the posix-timers code is
432 * about to disable them again anyway.
434 spin_unlock(&tsk->sighand->siglock);
435 do_schedule_next_timer(info);
436 spin_lock(&tsk->sighand->siglock);
442 * Tell a process that it has a new active signal..
444 * NOTE! we rely on the previous spin_lock to
445 * lock interrupts for us! We can only be called with
446 * "siglock" held, and the local interrupt must
447 * have been disabled when that got acquired!
449 * No need to set need_resched since signal event passing
450 * goes through ->blocked
452 void signal_wake_up(struct task_struct *t, int resume)
456 set_tsk_thread_flag(t, TIF_SIGPENDING);
459 * For SIGKILL, we want to wake it up in the stopped/traced/killable
460 * case. We don't check t->state here because there is a race with it
461 * executing another processor and just now entering stopped state.
462 * By using wake_up_state, we ensure the process will wake up and
463 * handle its death signal.
465 mask = TASK_INTERRUPTIBLE;
467 mask |= TASK_WAKEKILL;
468 if (!wake_up_state(t, mask))
473 * Remove signals in mask from the pending set and queue.
474 * Returns 1 if any signals were found.
476 * All callers must be holding the siglock.
478 * This version takes a sigset mask and looks at all signals,
479 * not just those in the first mask word.
481 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
483 struct sigqueue *q, *n;
486 sigandsets(&m, mask, &s->signal);
487 if (sigisemptyset(&m))
490 signandsets(&s->signal, &s->signal, mask);
491 list_for_each_entry_safe(q, n, &s->list, list) {
492 if (sigismember(mask, q->info.si_signo)) {
493 list_del_init(&q->list);
500 * Remove signals in mask from the pending set and queue.
501 * Returns 1 if any signals were found.
503 * All callers must be holding the siglock.
505 static int rm_from_queue(unsigned long mask, struct sigpending *s)
507 struct sigqueue *q, *n;
509 if (!sigtestsetmask(&s->signal, mask))
512 sigdelsetmask(&s->signal, mask);
513 list_for_each_entry_safe(q, n, &s->list, list) {
514 if (q->info.si_signo < SIGRTMIN &&
515 (mask & sigmask(q->info.si_signo))) {
516 list_del_init(&q->list);
524 * Bad permissions for sending the signal
526 static int check_kill_permission(int sig, struct siginfo *info,
527 struct task_struct *t)
530 if (!valid_signal(sig))
533 if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
534 error = audit_signal_info(sig, t); /* Let audit system see the signal */
538 if (((sig != SIGCONT) ||
539 (task_session_nr(current) != task_session_nr(t)))
540 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
541 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
542 && !capable(CAP_KILL))
546 return security_task_kill(t, info, sig, 0);
550 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
553 * Handle magic process-wide effects of stop/continue signals.
554 * Unlike the signal actions, these happen immediately at signal-generation
555 * time regardless of blocking, ignoring, or handling. This does the
556 * actual continuing for SIGCONT, but not the actual stopping for stop
557 * signals. The process stop is done as a signal action for SIG_DFL.
559 static void handle_stop_signal(int sig, struct task_struct *p)
561 struct task_struct *t;
563 if (p->signal->flags & SIGNAL_GROUP_EXIT)
565 * The process is in the middle of dying already.
569 if (sig_kernel_stop(sig)) {
571 * This is a stop signal. Remove SIGCONT from all queues.
573 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
576 rm_from_queue(sigmask(SIGCONT), &t->pending);
579 } else if (sig == SIGCONT) {
581 * Remove all stop signals from all queues,
582 * and wake all threads.
584 if (unlikely(p->signal->group_stop_count > 0)) {
586 * There was a group stop in progress. We'll
587 * pretend it finished before we got here. We are
588 * obliged to report it to the parent: if the
589 * SIGSTOP happened "after" this SIGCONT, then it
590 * would have cleared this pending SIGCONT. If it
591 * happened "before" this SIGCONT, then the parent
592 * got the SIGCHLD about the stop finishing before
593 * the continue happened. We do the notification
594 * now, and it's as if the stop had finished and
595 * the SIGCHLD was pending on entry to this kill.
597 p->signal->group_stop_count = 0;
598 p->signal->flags = SIGNAL_STOP_CONTINUED;
599 spin_unlock(&p->sighand->siglock);
600 do_notify_parent_cldstop(p, CLD_STOPPED);
601 spin_lock(&p->sighand->siglock);
603 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
607 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
610 * If there is a handler for SIGCONT, we must make
611 * sure that no thread returns to user mode before
612 * we post the signal, in case it was the only
613 * thread eligible to run the signal handler--then
614 * it must not do anything between resuming and
615 * running the handler. With the TIF_SIGPENDING
616 * flag set, the thread will pause and acquire the
617 * siglock that we hold now and until we've queued
618 * the pending signal.
620 * Wake up the stopped thread _after_ setting
623 state = __TASK_STOPPED;
624 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
625 set_tsk_thread_flag(t, TIF_SIGPENDING);
626 state |= TASK_INTERRUPTIBLE;
628 wake_up_state(t, state);
633 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
635 * We were in fact stopped, and are now continued.
636 * Notify the parent with CLD_CONTINUED.
638 p->signal->flags = SIGNAL_STOP_CONTINUED;
639 p->signal->group_exit_code = 0;
640 spin_unlock(&p->sighand->siglock);
641 do_notify_parent_cldstop(p, CLD_CONTINUED);
642 spin_lock(&p->sighand->siglock);
645 * We are not stopped, but there could be a stop
646 * signal in the middle of being processed after
647 * being removed from the queue. Clear that too.
649 p->signal->flags = 0;
651 } else if (sig == SIGKILL) {
653 * Make sure that any pending stop signal already dequeued
654 * is undone by the wakeup for SIGKILL.
656 p->signal->flags = 0;
660 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
661 struct sigpending *signals)
663 struct sigqueue * q = NULL;
667 * Deliver the signal to listening signalfds. This must be called
668 * with the sighand lock held.
670 signalfd_notify(t, sig);
673 * fast-pathed signals for kernel-internal things like SIGSTOP
676 if (info == SEND_SIG_FORCED)
679 /* Real-time signals must be queued if sent by sigqueue, or
680 some other real-time mechanism. It is implementation
681 defined whether kill() does so. We attempt to do so, on
682 the principle of least surprise, but since kill is not
683 allowed to fail with EAGAIN when low on memory we just
684 make sure at least one signal gets delivered and don't
685 pass on the info struct. */
687 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
688 (is_si_special(info) ||
689 info->si_code >= 0)));
691 list_add_tail(&q->list, &signals->list);
692 switch ((unsigned long) info) {
693 case (unsigned long) SEND_SIG_NOINFO:
694 q->info.si_signo = sig;
695 q->info.si_errno = 0;
696 q->info.si_code = SI_USER;
697 q->info.si_pid = task_pid_vnr(current);
698 q->info.si_uid = current->uid;
700 case (unsigned long) SEND_SIG_PRIV:
701 q->info.si_signo = sig;
702 q->info.si_errno = 0;
703 q->info.si_code = SI_KERNEL;
708 copy_siginfo(&q->info, info);
711 } else if (!is_si_special(info)) {
712 if (sig >= SIGRTMIN && info->si_code != SI_USER)
714 * Queue overflow, abort. We may abort if the signal was rt
715 * and sent by user using something other than kill().
721 sigaddset(&signals->signal, sig);
725 #define LEGACY_QUEUE(sigptr, sig) \
726 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
728 int print_fatal_signals;
730 static void print_fatal_signal(struct pt_regs *regs, int signr)
732 printk("%s/%d: potentially unexpected fatal signal %d.\n",
733 current->comm, task_pid_nr(current), signr);
735 #if defined(__i386__) && !defined(__arch_um__)
736 printk("code at %08lx: ", regs->ip);
739 for (i = 0; i < 16; i++) {
742 __get_user(insn, (unsigned char *)(regs->ip + i));
743 printk("%02x ", insn);
751 static int __init setup_print_fatal_signals(char *str)
753 get_option (&str, &print_fatal_signals);
758 __setup("print-fatal-signals=", setup_print_fatal_signals);
761 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
765 BUG_ON(!irqs_disabled());
766 assert_spin_locked(&t->sighand->siglock);
768 /* Short-circuit ignored signals. */
769 if (sig_ignored(t, sig))
772 /* Support queueing exactly one non-rt signal, so that we
773 can get more detailed information about the cause of
775 if (LEGACY_QUEUE(&t->pending, sig))
778 ret = send_signal(sig, info, t, &t->pending);
779 if (!ret && !sigismember(&t->blocked, sig))
780 signal_wake_up(t, sig == SIGKILL);
786 * Force a signal that the process can't ignore: if necessary
787 * we unblock the signal and change any SIG_IGN to SIG_DFL.
789 * Note: If we unblock the signal, we always reset it to SIG_DFL,
790 * since we do not want to have a signal handler that was blocked
791 * be invoked when user space had explicitly blocked it.
793 * We don't want to have recursive SIGSEGV's etc, for example.
796 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
798 unsigned long int flags;
799 int ret, blocked, ignored;
800 struct k_sigaction *action;
802 spin_lock_irqsave(&t->sighand->siglock, flags);
803 action = &t->sighand->action[sig-1];
804 ignored = action->sa.sa_handler == SIG_IGN;
805 blocked = sigismember(&t->blocked, sig);
806 if (blocked || ignored) {
807 action->sa.sa_handler = SIG_DFL;
809 sigdelset(&t->blocked, sig);
810 recalc_sigpending_and_wake(t);
813 ret = specific_send_sig_info(sig, info, t);
814 spin_unlock_irqrestore(&t->sighand->siglock, flags);
820 force_sig_specific(int sig, struct task_struct *t)
822 force_sig_info(sig, SEND_SIG_FORCED, t);
826 * Test if P wants to take SIG. After we've checked all threads with this,
827 * it's equivalent to finding no threads not blocking SIG. Any threads not
828 * blocking SIG were ruled out because they are not running and already
829 * have pending signals. Such threads will dequeue from the shared queue
830 * as soon as they're available, so putting the signal on the shared queue
831 * will be equivalent to sending it to one such thread.
833 static inline int wants_signal(int sig, struct task_struct *p)
835 if (sigismember(&p->blocked, sig))
837 if (p->flags & PF_EXITING)
841 if (task_is_stopped_or_traced(p))
843 return task_curr(p) || !signal_pending(p);
847 __group_complete_signal(int sig, struct task_struct *p)
849 struct task_struct *t;
852 * Now find a thread we can wake up to take the signal off the queue.
854 * If the main thread wants the signal, it gets first crack.
855 * Probably the least surprising to the average bear.
857 if (wants_signal(sig, p))
859 else if (thread_group_empty(p))
861 * There is just one thread and it does not need to be woken.
862 * It will dequeue unblocked signals before it runs again.
867 * Otherwise try to find a suitable thread.
869 t = p->signal->curr_target;
871 /* restart balancing at this thread */
872 t = p->signal->curr_target = p;
874 while (!wants_signal(sig, t)) {
876 if (t == p->signal->curr_target)
878 * No thread needs to be woken.
879 * Any eligible threads will see
880 * the signal in the queue soon.
884 p->signal->curr_target = t;
888 * Found a killable thread. If the signal will be fatal,
889 * then start taking the whole group down immediately.
891 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
892 !sigismember(&t->real_blocked, sig) &&
893 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
895 * This signal will be fatal to the whole group.
897 if (!sig_kernel_coredump(sig)) {
899 * Start a group exit and wake everybody up.
900 * This way we don't have other threads
901 * running and doing things after a slower
902 * thread has the fatal signal pending.
904 p->signal->flags = SIGNAL_GROUP_EXIT;
905 p->signal->group_exit_code = sig;
906 p->signal->group_stop_count = 0;
909 sigaddset(&t->pending.signal, SIGKILL);
910 signal_wake_up(t, 1);
911 } while_each_thread(p, t);
917 * The signal is already in the shared-pending queue.
918 * Tell the chosen thread to wake up and dequeue it.
920 signal_wake_up(t, sig == SIGKILL);
925 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
929 assert_spin_locked(&p->sighand->siglock);
930 handle_stop_signal(sig, p);
932 /* Short-circuit ignored signals. */
933 if (sig_ignored(p, sig))
936 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
937 /* This is a non-RT signal and we already have one queued. */
941 * Put this signal on the shared-pending queue, or fail with EAGAIN.
942 * We always use the shared queue for process-wide signals,
943 * to avoid several races.
945 ret = send_signal(sig, info, p, &p->signal->shared_pending);
949 __group_complete_signal(sig, p);
954 * Nuke all other threads in the group.
956 void zap_other_threads(struct task_struct *p)
958 struct task_struct *t;
960 p->signal->group_stop_count = 0;
962 for (t = next_thread(p); t != p; t = next_thread(t)) {
964 * Don't bother with already dead threads
969 /* SIGKILL will be handled before any pending SIGSTOP */
970 sigaddset(&t->pending.signal, SIGKILL);
971 signal_wake_up(t, 1);
975 int fastcall __fatal_signal_pending(struct task_struct *tsk)
977 return sigismember(&tsk->pending.signal, SIGKILL);
979 EXPORT_SYMBOL(__fatal_signal_pending);
982 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
984 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
986 struct sighand_struct *sighand;
989 sighand = rcu_dereference(tsk->sighand);
990 if (unlikely(sighand == NULL))
993 spin_lock_irqsave(&sighand->siglock, *flags);
994 if (likely(sighand == tsk->sighand))
996 spin_unlock_irqrestore(&sighand->siglock, *flags);
1002 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1004 unsigned long flags;
1007 ret = check_kill_permission(sig, info, p);
1011 if (lock_task_sighand(p, &flags)) {
1012 ret = __group_send_sig_info(sig, info, p);
1013 unlock_task_sighand(p, &flags);
1021 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1022 * control characters do (^C, ^Z etc)
1025 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1027 struct task_struct *p = NULL;
1028 int retval, success;
1032 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1033 int err = group_send_sig_info(sig, info, p);
1036 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1037 return success ? 0 : retval;
1040 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1044 read_lock(&tasklist_lock);
1045 retval = __kill_pgrp_info(sig, info, pgrp);
1046 read_unlock(&tasklist_lock);
1051 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1054 struct task_struct *p;
1057 if (unlikely(sig_needs_tasklist(sig)))
1058 read_lock(&tasklist_lock);
1061 p = pid_task(pid, PIDTYPE_PID);
1063 error = group_send_sig_info(sig, info, p);
1064 if (unlikely(error == -ESRCH))
1066 * The task was unhashed in between, try again.
1067 * If it is dead, pid_task() will return NULL,
1068 * if we race with de_thread() it will find the
1074 if (unlikely(sig_needs_tasklist(sig)))
1075 read_unlock(&tasklist_lock);
1081 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1085 error = kill_pid_info(sig, info, find_vpid(pid));
1090 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1091 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1092 uid_t uid, uid_t euid, u32 secid)
1095 struct task_struct *p;
1097 if (!valid_signal(sig))
1100 read_lock(&tasklist_lock);
1101 p = pid_task(pid, PIDTYPE_PID);
1106 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1107 && (euid != p->suid) && (euid != p->uid)
1108 && (uid != p->suid) && (uid != p->uid)) {
1112 ret = security_task_kill(p, info, sig, secid);
1115 if (sig && p->sighand) {
1116 unsigned long flags;
1117 spin_lock_irqsave(&p->sighand->siglock, flags);
1118 ret = __group_send_sig_info(sig, info, p);
1119 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1122 read_unlock(&tasklist_lock);
1125 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1128 * kill_something_info() interprets pid in interesting ways just like kill(2).
1130 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1131 * is probably wrong. Should make it like BSD or SYSV.
1134 static int kill_something_info(int sig, struct siginfo *info, int pid)
1140 ret = kill_pid_info(sig, info, find_vpid(pid));
1145 read_lock(&tasklist_lock);
1147 ret = __kill_pgrp_info(sig, info,
1148 pid ? find_vpid(-pid) : task_pgrp(current));
1150 int retval = 0, count = 0;
1151 struct task_struct * p;
1153 for_each_process(p) {
1154 if (p->pid > 1 && !same_thread_group(p, current)) {
1155 int err = group_send_sig_info(sig, info, p);
1161 ret = count ? retval : -ESRCH;
1163 read_unlock(&tasklist_lock);
1169 * These are for backward compatibility with the rest of the kernel source.
1173 * These two are the most common entry points. They send a signal
1174 * just to the specific thread.
1177 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1180 unsigned long flags;
1183 * Make sure legacy kernel users don't send in bad values
1184 * (normal paths check this in check_kill_permission).
1186 if (!valid_signal(sig))
1190 * We need the tasklist lock even for the specific
1191 * thread case (when we don't need to follow the group
1192 * lists) in order to avoid races with "p->sighand"
1193 * going away or changing from under us.
1195 read_lock(&tasklist_lock);
1196 spin_lock_irqsave(&p->sighand->siglock, flags);
1197 ret = specific_send_sig_info(sig, info, p);
1198 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1199 read_unlock(&tasklist_lock);
1203 #define __si_special(priv) \
1204 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1207 send_sig(int sig, struct task_struct *p, int priv)
1209 return send_sig_info(sig, __si_special(priv), p);
1213 force_sig(int sig, struct task_struct *p)
1215 force_sig_info(sig, SEND_SIG_PRIV, p);
1219 * When things go south during signal handling, we
1220 * will force a SIGSEGV. And if the signal that caused
1221 * the problem was already a SIGSEGV, we'll want to
1222 * make sure we don't even try to deliver the signal..
1225 force_sigsegv(int sig, struct task_struct *p)
1227 if (sig == SIGSEGV) {
1228 unsigned long flags;
1229 spin_lock_irqsave(&p->sighand->siglock, flags);
1230 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1231 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1233 force_sig(SIGSEGV, p);
1237 int kill_pgrp(struct pid *pid, int sig, int priv)
1239 return kill_pgrp_info(sig, __si_special(priv), pid);
1241 EXPORT_SYMBOL(kill_pgrp);
1243 int kill_pid(struct pid *pid, int sig, int priv)
1245 return kill_pid_info(sig, __si_special(priv), pid);
1247 EXPORT_SYMBOL(kill_pid);
1250 kill_proc(pid_t pid, int sig, int priv)
1255 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1261 * These functions support sending signals using preallocated sigqueue
1262 * structures. This is needed "because realtime applications cannot
1263 * afford to lose notifications of asynchronous events, like timer
1264 * expirations or I/O completions". In the case of Posix Timers
1265 * we allocate the sigqueue structure from the timer_create. If this
1266 * allocation fails we are able to report the failure to the application
1267 * with an EAGAIN error.
1270 struct sigqueue *sigqueue_alloc(void)
1274 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1275 q->flags |= SIGQUEUE_PREALLOC;
1279 void sigqueue_free(struct sigqueue *q)
1281 unsigned long flags;
1282 spinlock_t *lock = ¤t->sighand->siglock;
1284 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1286 * If the signal is still pending remove it from the
1287 * pending queue. We must hold ->siglock while testing
1288 * q->list to serialize with collect_signal().
1290 spin_lock_irqsave(lock, flags);
1291 if (!list_empty(&q->list))
1292 list_del_init(&q->list);
1293 spin_unlock_irqrestore(lock, flags);
1295 q->flags &= ~SIGQUEUE_PREALLOC;
1299 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1301 unsigned long flags;
1304 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1307 * The rcu based delayed sighand destroy makes it possible to
1308 * run this without tasklist lock held. The task struct itself
1309 * cannot go away as create_timer did get_task_struct().
1311 * We return -1, when the task is marked exiting, so
1312 * posix_timer_event can redirect it to the group leader
1316 if (!likely(lock_task_sighand(p, &flags))) {
1321 if (unlikely(!list_empty(&q->list))) {
1323 * If an SI_TIMER entry is already queue just increment
1324 * the overrun count.
1326 BUG_ON(q->info.si_code != SI_TIMER);
1327 q->info.si_overrun++;
1330 /* Short-circuit ignored signals. */
1331 if (sig_ignored(p, sig)) {
1336 * Deliver the signal to listening signalfds. This must be called
1337 * with the sighand lock held.
1339 signalfd_notify(p, sig);
1341 list_add_tail(&q->list, &p->pending.list);
1342 sigaddset(&p->pending.signal, sig);
1343 if (!sigismember(&p->blocked, sig))
1344 signal_wake_up(p, sig == SIGKILL);
1347 unlock_task_sighand(p, &flags);
1355 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1357 unsigned long flags;
1360 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1362 read_lock(&tasklist_lock);
1363 /* Since it_lock is held, p->sighand cannot be NULL. */
1364 spin_lock_irqsave(&p->sighand->siglock, flags);
1365 handle_stop_signal(sig, p);
1367 /* Short-circuit ignored signals. */
1368 if (sig_ignored(p, sig)) {
1373 if (unlikely(!list_empty(&q->list))) {
1375 * If an SI_TIMER entry is already queue just increment
1376 * the overrun count. Other uses should not try to
1377 * send the signal multiple times.
1379 BUG_ON(q->info.si_code != SI_TIMER);
1380 q->info.si_overrun++;
1384 * Deliver the signal to listening signalfds. This must be called
1385 * with the sighand lock held.
1387 signalfd_notify(p, sig);
1390 * Put this signal on the shared-pending queue.
1391 * We always use the shared queue for process-wide signals,
1392 * to avoid several races.
1394 list_add_tail(&q->list, &p->signal->shared_pending.list);
1395 sigaddset(&p->signal->shared_pending.signal, sig);
1397 __group_complete_signal(sig, p);
1399 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1400 read_unlock(&tasklist_lock);
1405 * Wake up any threads in the parent blocked in wait* syscalls.
1407 static inline void __wake_up_parent(struct task_struct *p,
1408 struct task_struct *parent)
1410 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1414 * Let a parent know about the death of a child.
1415 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1418 void do_notify_parent(struct task_struct *tsk, int sig)
1420 struct siginfo info;
1421 unsigned long flags;
1422 struct sighand_struct *psig;
1426 /* do_notify_parent_cldstop should have been called instead. */
1427 BUG_ON(task_is_stopped_or_traced(tsk));
1429 BUG_ON(!tsk->ptrace &&
1430 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1432 info.si_signo = sig;
1435 * we are under tasklist_lock here so our parent is tied to
1436 * us and cannot exit and release its namespace.
1438 * the only it can is to switch its nsproxy with sys_unshare,
1439 * bu uncharing pid namespaces is not allowed, so we'll always
1440 * see relevant namespace
1442 * write_lock() currently calls preempt_disable() which is the
1443 * same as rcu_read_lock(), but according to Oleg, this is not
1444 * correct to rely on this
1447 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1450 info.si_uid = tsk->uid;
1452 /* FIXME: find out whether or not this is supposed to be c*time. */
1453 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1454 tsk->signal->utime));
1455 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1456 tsk->signal->stime));
1458 info.si_status = tsk->exit_code & 0x7f;
1459 if (tsk->exit_code & 0x80)
1460 info.si_code = CLD_DUMPED;
1461 else if (tsk->exit_code & 0x7f)
1462 info.si_code = CLD_KILLED;
1464 info.si_code = CLD_EXITED;
1465 info.si_status = tsk->exit_code >> 8;
1468 psig = tsk->parent->sighand;
1469 spin_lock_irqsave(&psig->siglock, flags);
1470 if (!tsk->ptrace && sig == SIGCHLD &&
1471 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1472 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1474 * We are exiting and our parent doesn't care. POSIX.1
1475 * defines special semantics for setting SIGCHLD to SIG_IGN
1476 * or setting the SA_NOCLDWAIT flag: we should be reaped
1477 * automatically and not left for our parent's wait4 call.
1478 * Rather than having the parent do it as a magic kind of
1479 * signal handler, we just set this to tell do_exit that we
1480 * can be cleaned up without becoming a zombie. Note that
1481 * we still call __wake_up_parent in this case, because a
1482 * blocked sys_wait4 might now return -ECHILD.
1484 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1485 * is implementation-defined: we do (if you don't want
1486 * it, just use SIG_IGN instead).
1488 tsk->exit_signal = -1;
1489 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1492 if (valid_signal(sig) && sig > 0)
1493 __group_send_sig_info(sig, &info, tsk->parent);
1494 __wake_up_parent(tsk, tsk->parent);
1495 spin_unlock_irqrestore(&psig->siglock, flags);
1498 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1500 struct siginfo info;
1501 unsigned long flags;
1502 struct task_struct *parent;
1503 struct sighand_struct *sighand;
1505 if (tsk->ptrace & PT_PTRACED)
1506 parent = tsk->parent;
1508 tsk = tsk->group_leader;
1509 parent = tsk->real_parent;
1512 info.si_signo = SIGCHLD;
1515 * see comment in do_notify_parent() abot the following 3 lines
1518 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1521 info.si_uid = tsk->uid;
1523 /* FIXME: find out whether or not this is supposed to be c*time. */
1524 info.si_utime = cputime_to_jiffies(tsk->utime);
1525 info.si_stime = cputime_to_jiffies(tsk->stime);
1530 info.si_status = SIGCONT;
1533 info.si_status = tsk->signal->group_exit_code & 0x7f;
1536 info.si_status = tsk->exit_code & 0x7f;
1542 sighand = parent->sighand;
1543 spin_lock_irqsave(&sighand->siglock, flags);
1544 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1545 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1546 __group_send_sig_info(SIGCHLD, &info, parent);
1548 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1550 __wake_up_parent(tsk, parent);
1551 spin_unlock_irqrestore(&sighand->siglock, flags);
1554 static inline int may_ptrace_stop(void)
1556 if (!likely(current->ptrace & PT_PTRACED))
1559 * Are we in the middle of do_coredump?
1560 * If so and our tracer is also part of the coredump stopping
1561 * is a deadlock situation, and pointless because our tracer
1562 * is dead so don't allow us to stop.
1563 * If SIGKILL was already sent before the caller unlocked
1564 * ->siglock we must see ->core_waiters != 0. Otherwise it
1565 * is safe to enter schedule().
1567 if (unlikely(current->mm->core_waiters) &&
1568 unlikely(current->mm == current->parent->mm))
1575 * Return nonzero if there is a SIGKILL that should be waking us up.
1576 * Called with the siglock held.
1578 static int sigkill_pending(struct task_struct *tsk)
1580 return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1581 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1582 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1586 * This must be called with current->sighand->siglock held.
1588 * This should be the path for all ptrace stops.
1589 * We always set current->last_siginfo while stopped here.
1590 * That makes it a way to test a stopped process for
1591 * being ptrace-stopped vs being job-control-stopped.
1593 * If we actually decide not to stop at all because the tracer
1594 * is gone, we keep current->exit_code unless clear_code.
1596 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1600 if (arch_ptrace_stop_needed(exit_code, info)) {
1602 * The arch code has something special to do before a
1603 * ptrace stop. This is allowed to block, e.g. for faults
1604 * on user stack pages. We can't keep the siglock while
1605 * calling arch_ptrace_stop, so we must release it now.
1606 * To preserve proper semantics, we must do this before
1607 * any signal bookkeeping like checking group_stop_count.
1608 * Meanwhile, a SIGKILL could come in before we retake the
1609 * siglock. That must prevent us from sleeping in TASK_TRACED.
1610 * So after regaining the lock, we must check for SIGKILL.
1612 spin_unlock_irq(¤t->sighand->siglock);
1613 arch_ptrace_stop(exit_code, info);
1614 spin_lock_irq(¤t->sighand->siglock);
1615 killed = sigkill_pending(current);
1619 * If there is a group stop in progress,
1620 * we must participate in the bookkeeping.
1622 if (current->signal->group_stop_count > 0)
1623 --current->signal->group_stop_count;
1625 current->last_siginfo = info;
1626 current->exit_code = exit_code;
1628 /* Let the debugger run. */
1629 __set_current_state(TASK_TRACED);
1630 spin_unlock_irq(¤t->sighand->siglock);
1632 read_lock(&tasklist_lock);
1633 if (!unlikely(killed) && may_ptrace_stop()) {
1634 do_notify_parent_cldstop(current, CLD_TRAPPED);
1635 read_unlock(&tasklist_lock);
1639 * By the time we got the lock, our tracer went away.
1640 * Don't drop the lock yet, another tracer may come.
1642 __set_current_state(TASK_RUNNING);
1644 current->exit_code = 0;
1645 read_unlock(&tasklist_lock);
1649 * We are back. Now reacquire the siglock before touching
1650 * last_siginfo, so that we are sure to have synchronized with
1651 * any signal-sending on another CPU that wants to examine it.
1653 spin_lock_irq(¤t->sighand->siglock);
1654 current->last_siginfo = NULL;
1657 * Queued signals ignored us while we were stopped for tracing.
1658 * So check for any that we should take before resuming user mode.
1659 * This sets TIF_SIGPENDING, but never clears it.
1661 recalc_sigpending_tsk(current);
1664 void ptrace_notify(int exit_code)
1668 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1670 memset(&info, 0, sizeof info);
1671 info.si_signo = SIGTRAP;
1672 info.si_code = exit_code;
1673 info.si_pid = task_pid_vnr(current);
1674 info.si_uid = current->uid;
1676 /* Let the debugger run. */
1677 spin_lock_irq(¤t->sighand->siglock);
1678 ptrace_stop(exit_code, 1, &info);
1679 spin_unlock_irq(¤t->sighand->siglock);
1683 finish_stop(int stop_count)
1686 * If there are no other threads in the group, or if there is
1687 * a group stop in progress and we are the last to stop,
1688 * report to the parent. When ptraced, every thread reports itself.
1690 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1691 read_lock(&tasklist_lock);
1692 do_notify_parent_cldstop(current, CLD_STOPPED);
1693 read_unlock(&tasklist_lock);
1698 } while (try_to_freeze());
1700 * Now we don't run again until continued.
1702 current->exit_code = 0;
1706 * This performs the stopping for SIGSTOP and other stop signals.
1707 * We have to stop all threads in the thread group.
1708 * Returns nonzero if we've actually stopped and released the siglock.
1709 * Returns zero if we didn't stop and still hold the siglock.
1711 static int do_signal_stop(int signr)
1713 struct signal_struct *sig = current->signal;
1716 if (sig->group_stop_count > 0) {
1718 * There is a group stop in progress. We don't need to
1719 * start another one.
1721 stop_count = --sig->group_stop_count;
1723 struct task_struct *t;
1725 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1726 unlikely(sig->group_exit_task))
1729 * There is no group stop already in progress.
1730 * We must initiate one now.
1732 sig->group_exit_code = signr;
1735 for (t = next_thread(current); t != current; t = next_thread(t))
1737 * Setting state to TASK_STOPPED for a group
1738 * stop is always done with the siglock held,
1739 * so this check has no races.
1741 if (!(t->flags & PF_EXITING) &&
1742 !task_is_stopped_or_traced(t)) {
1744 signal_wake_up(t, 0);
1746 sig->group_stop_count = stop_count;
1749 if (stop_count == 0)
1750 sig->flags = SIGNAL_STOP_STOPPED;
1751 current->exit_code = sig->group_exit_code;
1752 __set_current_state(TASK_STOPPED);
1754 spin_unlock_irq(¤t->sighand->siglock);
1755 finish_stop(stop_count);
1759 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1760 struct pt_regs *regs, void *cookie)
1762 sigset_t *mask = ¤t->blocked;
1768 spin_lock_irq(¤t->sighand->siglock);
1770 struct k_sigaction *ka;
1772 if (unlikely(current->signal->group_stop_count > 0) &&
1776 signr = dequeue_signal(current, mask, info);
1779 break; /* will return 0 */
1781 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1782 ptrace_signal_deliver(regs, cookie);
1784 /* Let the debugger run. */
1785 ptrace_stop(signr, 0, info);
1787 /* We're back. Did the debugger cancel the sig? */
1788 signr = current->exit_code;
1792 current->exit_code = 0;
1794 /* Update the siginfo structure if the signal has
1795 changed. If the debugger wanted something
1796 specific in the siginfo structure then it should
1797 have updated *info via PTRACE_SETSIGINFO. */
1798 if (signr != info->si_signo) {
1799 info->si_signo = signr;
1801 info->si_code = SI_USER;
1802 info->si_pid = task_pid_vnr(current->parent);
1803 info->si_uid = current->parent->uid;
1806 /* If the (new) signal is now blocked, requeue it. */
1807 if (sigismember(¤t->blocked, signr)) {
1808 specific_send_sig_info(signr, info, current);
1813 ka = ¤t->sighand->action[signr-1];
1814 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1816 if (ka->sa.sa_handler != SIG_DFL) {
1817 /* Run the handler. */
1820 if (ka->sa.sa_flags & SA_ONESHOT)
1821 ka->sa.sa_handler = SIG_DFL;
1823 break; /* will return non-zero "signr" value */
1827 * Now we are doing the default action for this signal.
1829 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1833 * Global init gets no signals it doesn't want.
1835 if (is_global_init(current))
1838 if (sig_kernel_stop(signr)) {
1840 * The default action is to stop all threads in
1841 * the thread group. The job control signals
1842 * do nothing in an orphaned pgrp, but SIGSTOP
1843 * always works. Note that siglock needs to be
1844 * dropped during the call to is_orphaned_pgrp()
1845 * because of lock ordering with tasklist_lock.
1846 * This allows an intervening SIGCONT to be posted.
1847 * We need to check for that and bail out if necessary.
1849 if (signr != SIGSTOP) {
1850 spin_unlock_irq(¤t->sighand->siglock);
1852 /* signals can be posted during this window */
1854 if (is_current_pgrp_orphaned())
1857 spin_lock_irq(¤t->sighand->siglock);
1860 if (likely(do_signal_stop(signr))) {
1861 /* It released the siglock. */
1866 * We didn't actually stop, due to a race
1867 * with SIGCONT or something like that.
1872 spin_unlock_irq(¤t->sighand->siglock);
1875 * Anything else is fatal, maybe with a core dump.
1877 current->flags |= PF_SIGNALED;
1878 if ((signr != SIGKILL) && print_fatal_signals)
1879 print_fatal_signal(regs, signr);
1880 if (sig_kernel_coredump(signr)) {
1882 * If it was able to dump core, this kills all
1883 * other threads in the group and synchronizes with
1884 * their demise. If we lost the race with another
1885 * thread getting here, it set group_exit_code
1886 * first and our do_group_exit call below will use
1887 * that value and ignore the one we pass it.
1889 do_coredump((long)signr, signr, regs);
1893 * Death signals, no core dump.
1895 do_group_exit(signr);
1898 spin_unlock_irq(¤t->sighand->siglock);
1902 void exit_signals(struct task_struct *tsk)
1905 struct task_struct *t;
1907 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1908 tsk->flags |= PF_EXITING;
1912 spin_lock_irq(&tsk->sighand->siglock);
1914 * From now this task is not visible for group-wide signals,
1915 * see wants_signal(), do_signal_stop().
1917 tsk->flags |= PF_EXITING;
1918 if (!signal_pending(tsk))
1921 /* It could be that __group_complete_signal() choose us to
1922 * notify about group-wide signal. Another thread should be
1923 * woken now to take the signal since we will not.
1925 for (t = tsk; (t = next_thread(t)) != tsk; )
1926 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1927 recalc_sigpending_and_wake(t);
1929 if (unlikely(tsk->signal->group_stop_count) &&
1930 !--tsk->signal->group_stop_count) {
1931 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1935 spin_unlock_irq(&tsk->sighand->siglock);
1937 if (unlikely(group_stop)) {
1938 read_lock(&tasklist_lock);
1939 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1940 read_unlock(&tasklist_lock);
1944 EXPORT_SYMBOL(recalc_sigpending);
1945 EXPORT_SYMBOL_GPL(dequeue_signal);
1946 EXPORT_SYMBOL(flush_signals);
1947 EXPORT_SYMBOL(force_sig);
1948 EXPORT_SYMBOL(kill_proc);
1949 EXPORT_SYMBOL(ptrace_notify);
1950 EXPORT_SYMBOL(send_sig);
1951 EXPORT_SYMBOL(send_sig_info);
1952 EXPORT_SYMBOL(sigprocmask);
1953 EXPORT_SYMBOL(block_all_signals);
1954 EXPORT_SYMBOL(unblock_all_signals);
1958 * System call entry points.
1961 asmlinkage long sys_restart_syscall(void)
1963 struct restart_block *restart = ¤t_thread_info()->restart_block;
1964 return restart->fn(restart);
1967 long do_no_restart_syscall(struct restart_block *param)
1973 * We don't need to get the kernel lock - this is all local to this
1974 * particular thread.. (and that's good, because this is _heavily_
1975 * used by various programs)
1979 * This is also useful for kernel threads that want to temporarily
1980 * (or permanently) block certain signals.
1982 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1983 * interface happily blocks "unblockable" signals like SIGKILL
1986 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1990 spin_lock_irq(¤t->sighand->siglock);
1992 *oldset = current->blocked;
1997 sigorsets(¤t->blocked, ¤t->blocked, set);
2000 signandsets(¤t->blocked, ¤t->blocked, set);
2003 current->blocked = *set;
2008 recalc_sigpending();
2009 spin_unlock_irq(¤t->sighand->siglock);
2015 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2017 int error = -EINVAL;
2018 sigset_t old_set, new_set;
2020 /* XXX: Don't preclude handling different sized sigset_t's. */
2021 if (sigsetsize != sizeof(sigset_t))
2026 if (copy_from_user(&new_set, set, sizeof(*set)))
2028 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2030 error = sigprocmask(how, &new_set, &old_set);
2036 spin_lock_irq(¤t->sighand->siglock);
2037 old_set = current->blocked;
2038 spin_unlock_irq(¤t->sighand->siglock);
2042 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2050 long do_sigpending(void __user *set, unsigned long sigsetsize)
2052 long error = -EINVAL;
2055 if (sigsetsize > sizeof(sigset_t))
2058 spin_lock_irq(¤t->sighand->siglock);
2059 sigorsets(&pending, ¤t->pending.signal,
2060 ¤t->signal->shared_pending.signal);
2061 spin_unlock_irq(¤t->sighand->siglock);
2063 /* Outside the lock because only this thread touches it. */
2064 sigandsets(&pending, ¤t->blocked, &pending);
2067 if (!copy_to_user(set, &pending, sigsetsize))
2075 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2077 return do_sigpending(set, sigsetsize);
2080 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2082 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2086 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2088 if (from->si_code < 0)
2089 return __copy_to_user(to, from, sizeof(siginfo_t))
2092 * If you change siginfo_t structure, please be sure
2093 * this code is fixed accordingly.
2094 * Please remember to update the signalfd_copyinfo() function
2095 * inside fs/signalfd.c too, in case siginfo_t changes.
2096 * It should never copy any pad contained in the structure
2097 * to avoid security leaks, but must copy the generic
2098 * 3 ints plus the relevant union member.
2100 err = __put_user(from->si_signo, &to->si_signo);
2101 err |= __put_user(from->si_errno, &to->si_errno);
2102 err |= __put_user((short)from->si_code, &to->si_code);
2103 switch (from->si_code & __SI_MASK) {
2105 err |= __put_user(from->si_pid, &to->si_pid);
2106 err |= __put_user(from->si_uid, &to->si_uid);
2109 err |= __put_user(from->si_tid, &to->si_tid);
2110 err |= __put_user(from->si_overrun, &to->si_overrun);
2111 err |= __put_user(from->si_ptr, &to->si_ptr);
2114 err |= __put_user(from->si_band, &to->si_band);
2115 err |= __put_user(from->si_fd, &to->si_fd);
2118 err |= __put_user(from->si_addr, &to->si_addr);
2119 #ifdef __ARCH_SI_TRAPNO
2120 err |= __put_user(from->si_trapno, &to->si_trapno);
2124 err |= __put_user(from->si_pid, &to->si_pid);
2125 err |= __put_user(from->si_uid, &to->si_uid);
2126 err |= __put_user(from->si_status, &to->si_status);
2127 err |= __put_user(from->si_utime, &to->si_utime);
2128 err |= __put_user(from->si_stime, &to->si_stime);
2130 case __SI_RT: /* This is not generated by the kernel as of now. */
2131 case __SI_MESGQ: /* But this is */
2132 err |= __put_user(from->si_pid, &to->si_pid);
2133 err |= __put_user(from->si_uid, &to->si_uid);
2134 err |= __put_user(from->si_ptr, &to->si_ptr);
2136 default: /* this is just in case for now ... */
2137 err |= __put_user(from->si_pid, &to->si_pid);
2138 err |= __put_user(from->si_uid, &to->si_uid);
2147 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2148 siginfo_t __user *uinfo,
2149 const struct timespec __user *uts,
2158 /* XXX: Don't preclude handling different sized sigset_t's. */
2159 if (sigsetsize != sizeof(sigset_t))
2162 if (copy_from_user(&these, uthese, sizeof(these)))
2166 * Invert the set of allowed signals to get those we
2169 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2173 if (copy_from_user(&ts, uts, sizeof(ts)))
2175 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2180 spin_lock_irq(¤t->sighand->siglock);
2181 sig = dequeue_signal(current, &these, &info);
2183 timeout = MAX_SCHEDULE_TIMEOUT;
2185 timeout = (timespec_to_jiffies(&ts)
2186 + (ts.tv_sec || ts.tv_nsec));
2189 /* None ready -- temporarily unblock those we're
2190 * interested while we are sleeping in so that we'll
2191 * be awakened when they arrive. */
2192 current->real_blocked = current->blocked;
2193 sigandsets(¤t->blocked, ¤t->blocked, &these);
2194 recalc_sigpending();
2195 spin_unlock_irq(¤t->sighand->siglock);
2197 timeout = schedule_timeout_interruptible(timeout);
2199 spin_lock_irq(¤t->sighand->siglock);
2200 sig = dequeue_signal(current, &these, &info);
2201 current->blocked = current->real_blocked;
2202 siginitset(¤t->real_blocked, 0);
2203 recalc_sigpending();
2206 spin_unlock_irq(¤t->sighand->siglock);
2211 if (copy_siginfo_to_user(uinfo, &info))
2224 sys_kill(int pid, int sig)
2226 struct siginfo info;
2228 info.si_signo = sig;
2230 info.si_code = SI_USER;
2231 info.si_pid = task_tgid_vnr(current);
2232 info.si_uid = current->uid;
2234 return kill_something_info(sig, &info, pid);
2237 static int do_tkill(int tgid, int pid, int sig)
2240 struct siginfo info;
2241 struct task_struct *p;
2244 info.si_signo = sig;
2246 info.si_code = SI_TKILL;
2247 info.si_pid = task_tgid_vnr(current);
2248 info.si_uid = current->uid;
2250 read_lock(&tasklist_lock);
2251 p = find_task_by_vpid(pid);
2252 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2253 error = check_kill_permission(sig, &info, p);
2255 * The null signal is a permissions and process existence
2256 * probe. No signal is actually delivered.
2258 if (!error && sig && p->sighand) {
2259 spin_lock_irq(&p->sighand->siglock);
2260 handle_stop_signal(sig, p);
2261 error = specific_send_sig_info(sig, &info, p);
2262 spin_unlock_irq(&p->sighand->siglock);
2265 read_unlock(&tasklist_lock);
2271 * sys_tgkill - send signal to one specific thread
2272 * @tgid: the thread group ID of the thread
2273 * @pid: the PID of the thread
2274 * @sig: signal to be sent
2276 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2277 * exists but it's not belonging to the target process anymore. This
2278 * method solves the problem of threads exiting and PIDs getting reused.
2280 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2282 /* This is only valid for single tasks */
2283 if (pid <= 0 || tgid <= 0)
2286 return do_tkill(tgid, pid, sig);
2290 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2293 sys_tkill(int pid, int sig)
2295 /* This is only valid for single tasks */
2299 return do_tkill(0, pid, sig);
2303 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2307 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2310 /* Not even root can pretend to send signals from the kernel.
2311 Nor can they impersonate a kill(), which adds source info. */
2312 if (info.si_code >= 0)
2314 info.si_signo = sig;
2316 /* POSIX.1b doesn't mention process groups. */
2317 return kill_proc_info(sig, &info, pid);
2320 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2322 struct k_sigaction *k;
2325 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2328 k = ¤t->sighand->action[sig-1];
2330 spin_lock_irq(¤t->sighand->siglock);
2335 sigdelsetmask(&act->sa.sa_mask,
2336 sigmask(SIGKILL) | sigmask(SIGSTOP));
2340 * "Setting a signal action to SIG_IGN for a signal that is
2341 * pending shall cause the pending signal to be discarded,
2342 * whether or not it is blocked."
2344 * "Setting a signal action to SIG_DFL for a signal that is
2345 * pending and whose default action is to ignore the signal
2346 * (for example, SIGCHLD), shall cause the pending signal to
2347 * be discarded, whether or not it is blocked"
2349 if (act->sa.sa_handler == SIG_IGN ||
2350 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2351 struct task_struct *t = current;
2353 sigaddset(&mask, sig);
2354 rm_from_queue_full(&mask, &t->signal->shared_pending);
2356 rm_from_queue_full(&mask, &t->pending);
2358 } while (t != current);
2362 spin_unlock_irq(¤t->sighand->siglock);
2367 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2373 oss.ss_sp = (void __user *) current->sas_ss_sp;
2374 oss.ss_size = current->sas_ss_size;
2375 oss.ss_flags = sas_ss_flags(sp);
2384 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2385 || __get_user(ss_sp, &uss->ss_sp)
2386 || __get_user(ss_flags, &uss->ss_flags)
2387 || __get_user(ss_size, &uss->ss_size))
2391 if (on_sig_stack(sp))
2397 * Note - this code used to test ss_flags incorrectly
2398 * old code may have been written using ss_flags==0
2399 * to mean ss_flags==SS_ONSTACK (as this was the only
2400 * way that worked) - this fix preserves that older
2403 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2406 if (ss_flags == SS_DISABLE) {
2411 if (ss_size < MINSIGSTKSZ)
2415 current->sas_ss_sp = (unsigned long) ss_sp;
2416 current->sas_ss_size = ss_size;
2421 if (copy_to_user(uoss, &oss, sizeof(oss)))
2430 #ifdef __ARCH_WANT_SYS_SIGPENDING
2433 sys_sigpending(old_sigset_t __user *set)
2435 return do_sigpending(set, sizeof(*set));
2440 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2441 /* Some platforms have their own version with special arguments others
2442 support only sys_rt_sigprocmask. */
2445 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2448 old_sigset_t old_set, new_set;
2452 if (copy_from_user(&new_set, set, sizeof(*set)))
2454 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2456 spin_lock_irq(¤t->sighand->siglock);
2457 old_set = current->blocked.sig[0];
2465 sigaddsetmask(¤t->blocked, new_set);
2468 sigdelsetmask(¤t->blocked, new_set);
2471 current->blocked.sig[0] = new_set;
2475 recalc_sigpending();
2476 spin_unlock_irq(¤t->sighand->siglock);
2482 old_set = current->blocked.sig[0];
2485 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2492 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2494 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2496 sys_rt_sigaction(int sig,
2497 const struct sigaction __user *act,
2498 struct sigaction __user *oact,
2501 struct k_sigaction new_sa, old_sa;
2504 /* XXX: Don't preclude handling different sized sigset_t's. */
2505 if (sigsetsize != sizeof(sigset_t))
2509 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2513 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2516 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2522 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2524 #ifdef __ARCH_WANT_SYS_SGETMASK
2527 * For backwards compatibility. Functionality superseded by sigprocmask.
2533 return current->blocked.sig[0];
2537 sys_ssetmask(int newmask)
2541 spin_lock_irq(¤t->sighand->siglock);
2542 old = current->blocked.sig[0];
2544 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2546 recalc_sigpending();
2547 spin_unlock_irq(¤t->sighand->siglock);
2551 #endif /* __ARCH_WANT_SGETMASK */
2553 #ifdef __ARCH_WANT_SYS_SIGNAL
2555 * For backwards compatibility. Functionality superseded by sigaction.
2557 asmlinkage unsigned long
2558 sys_signal(int sig, __sighandler_t handler)
2560 struct k_sigaction new_sa, old_sa;
2563 new_sa.sa.sa_handler = handler;
2564 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2565 sigemptyset(&new_sa.sa.sa_mask);
2567 ret = do_sigaction(sig, &new_sa, &old_sa);
2569 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2571 #endif /* __ARCH_WANT_SYS_SIGNAL */
2573 #ifdef __ARCH_WANT_SYS_PAUSE
2578 current->state = TASK_INTERRUPTIBLE;
2580 return -ERESTARTNOHAND;
2585 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2586 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2590 /* XXX: Don't preclude handling different sized sigset_t's. */
2591 if (sigsetsize != sizeof(sigset_t))
2594 if (copy_from_user(&newset, unewset, sizeof(newset)))
2596 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2598 spin_lock_irq(¤t->sighand->siglock);
2599 current->saved_sigmask = current->blocked;
2600 current->blocked = newset;
2601 recalc_sigpending();
2602 spin_unlock_irq(¤t->sighand->siglock);
2604 current->state = TASK_INTERRUPTIBLE;
2606 set_thread_flag(TIF_RESTORE_SIGMASK);
2607 return -ERESTARTNOHAND;
2609 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2611 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2616 void __init signals_init(void)
2618 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);