2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h" /* audit_signal_info() */
41 * SLAB caches for signal bits.
44 static struct kmem_cache *sigqueue_cachep;
46 int print_fatal_signals __read_mostly;
48 static void __user *sig_handler(struct task_struct *t, int sig)
50 return t->sighand->action[sig - 1].sa.sa_handler;
53 static int sig_handler_ignored(void __user *handler, int sig)
55 /* Is it explicitly or implicitly ignored? */
56 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
60 static int sig_task_ignored(struct task_struct *t, int sig,
65 handler = sig_handler(t, sig);
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 handler == SIG_DFL && !from_ancestor_ns)
71 return sig_handler_ignored(handler, sig);
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
84 if (!sig_task_ignored(t, sig, from_ancestor_ns))
88 * Tracers may want to know about even ignored signals.
90 return !tracehook_consider_ignored_signal(t, sig);
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
102 switch (_NSIG_WORDS) {
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
125 static int recalc_sigpending_tsk(struct task_struct *t)
127 if (t->signal->group_stop_count > 0 ||
128 PENDING(&t->pending, &t->blocked) ||
129 PENDING(&t->signal->shared_pending, &t->blocked)) {
130 set_tsk_thread_flag(t, TIF_SIGPENDING);
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
145 void recalc_sigpending_and_wake(struct task_struct *t)
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
151 void recalc_sigpending(void)
153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 clear_thread_flag(TIF_SIGPENDING);
160 /* Given the mask, find the first available signal that should be serviced. */
162 int next_signal(struct sigpending *pending, sigset_t *mask)
164 unsigned long i, *s, *m, x;
167 s = pending->signal.sig;
169 switch (_NSIG_WORDS) {
171 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
172 if ((x = *s &~ *m) != 0) {
173 sig = ffz(~x) + i*_NSIG_BPW + 1;
178 case 2: if ((x = s[0] &~ m[0]) != 0)
180 else if ((x = s[1] &~ m[1]) != 0)
187 case 1: if ((x = *s &~ *m) != 0)
195 static inline void print_dropped_signal(int sig)
197 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
199 if (!print_fatal_signals)
202 if (!__ratelimit(&ratelimit_state))
205 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
206 current->comm, current->pid, sig);
210 * allocate a new signal queue record
211 * - this may be called without locks if and only if t == current, otherwise an
212 * appopriate lock must be held to stop the target task from exiting
214 static struct sigqueue *
215 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
217 struct sigqueue *q = NULL;
218 struct user_struct *user;
221 * We won't get problems with the target's UID changing under us
222 * because changing it requires RCU be used, and if t != current, the
223 * caller must be holding the RCU readlock (by way of a spinlock) and
224 * we use RCU protection here
226 user = get_uid(__task_cred(t)->user);
227 atomic_inc(&user->sigpending);
229 if (override_rlimit ||
230 atomic_read(&user->sigpending) <=
231 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
232 q = kmem_cache_alloc(sigqueue_cachep, flags);
234 print_dropped_signal(sig);
237 if (unlikely(q == NULL)) {
238 atomic_dec(&user->sigpending);
241 INIT_LIST_HEAD(&q->list);
249 static void __sigqueue_free(struct sigqueue *q)
251 if (q->flags & SIGQUEUE_PREALLOC)
253 atomic_dec(&q->user->sigpending);
255 kmem_cache_free(sigqueue_cachep, q);
258 void flush_sigqueue(struct sigpending *queue)
262 sigemptyset(&queue->signal);
263 while (!list_empty(&queue->list)) {
264 q = list_entry(queue->list.next, struct sigqueue , list);
265 list_del_init(&q->list);
271 * Flush all pending signals for a task.
273 void __flush_signals(struct task_struct *t)
275 clear_tsk_thread_flag(t, TIF_SIGPENDING);
276 flush_sigqueue(&t->pending);
277 flush_sigqueue(&t->signal->shared_pending);
280 void flush_signals(struct task_struct *t)
284 spin_lock_irqsave(&t->sighand->siglock, flags);
286 spin_unlock_irqrestore(&t->sighand->siglock, flags);
289 static void __flush_itimer_signals(struct sigpending *pending)
291 sigset_t signal, retain;
292 struct sigqueue *q, *n;
294 signal = pending->signal;
295 sigemptyset(&retain);
297 list_for_each_entry_safe(q, n, &pending->list, list) {
298 int sig = q->info.si_signo;
300 if (likely(q->info.si_code != SI_TIMER)) {
301 sigaddset(&retain, sig);
303 sigdelset(&signal, sig);
304 list_del_init(&q->list);
309 sigorsets(&pending->signal, &signal, &retain);
312 void flush_itimer_signals(void)
314 struct task_struct *tsk = current;
317 spin_lock_irqsave(&tsk->sighand->siglock, flags);
318 __flush_itimer_signals(&tsk->pending);
319 __flush_itimer_signals(&tsk->signal->shared_pending);
320 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
323 void ignore_signals(struct task_struct *t)
327 for (i = 0; i < _NSIG; ++i)
328 t->sighand->action[i].sa.sa_handler = SIG_IGN;
334 * Flush all handlers for a task.
338 flush_signal_handlers(struct task_struct *t, int force_default)
341 struct k_sigaction *ka = &t->sighand->action[0];
342 for (i = _NSIG ; i != 0 ; i--) {
343 if (force_default || ka->sa.sa_handler != SIG_IGN)
344 ka->sa.sa_handler = SIG_DFL;
346 sigemptyset(&ka->sa.sa_mask);
351 int unhandled_signal(struct task_struct *tsk, int sig)
353 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
354 if (is_global_init(tsk))
356 if (handler != SIG_IGN && handler != SIG_DFL)
358 return !tracehook_consider_fatal_signal(tsk, sig);
362 /* Notify the system that a driver wants to block all signals for this
363 * process, and wants to be notified if any signals at all were to be
364 * sent/acted upon. If the notifier routine returns non-zero, then the
365 * signal will be acted upon after all. If the notifier routine returns 0,
366 * then then signal will be blocked. Only one block per process is
367 * allowed. priv is a pointer to private data that the notifier routine
368 * can use to determine if the signal should be blocked or not. */
371 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
375 spin_lock_irqsave(¤t->sighand->siglock, flags);
376 current->notifier_mask = mask;
377 current->notifier_data = priv;
378 current->notifier = notifier;
379 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
382 /* Notify the system that blocking has ended. */
385 unblock_all_signals(void)
389 spin_lock_irqsave(¤t->sighand->siglock, flags);
390 current->notifier = NULL;
391 current->notifier_data = NULL;
393 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
396 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
398 struct sigqueue *q, *first = NULL;
401 * Collect the siginfo appropriate to this signal. Check if
402 * there is another siginfo for the same signal.
404 list_for_each_entry(q, &list->list, list) {
405 if (q->info.si_signo == sig) {
412 sigdelset(&list->signal, sig);
416 list_del_init(&first->list);
417 copy_siginfo(info, &first->info);
418 __sigqueue_free(first);
420 /* Ok, it wasn't in the queue. This must be
421 a fast-pathed signal or we must have been
422 out of queue space. So zero out the info.
424 info->si_signo = sig;
426 info->si_code = SI_USER;
432 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
435 int sig = next_signal(pending, mask);
438 if (current->notifier) {
439 if (sigismember(current->notifier_mask, sig)) {
440 if (!(current->notifier)(current->notifier_data)) {
441 clear_thread_flag(TIF_SIGPENDING);
447 collect_signal(sig, pending, info);
454 * Dequeue a signal and return the element to the caller, which is
455 * expected to free it.
457 * All callers have to hold the siglock.
459 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
463 /* We only dequeue private signals from ourselves, we don't let
464 * signalfd steal them
466 signr = __dequeue_signal(&tsk->pending, mask, info);
468 signr = __dequeue_signal(&tsk->signal->shared_pending,
473 * itimers are process shared and we restart periodic
474 * itimers in the signal delivery path to prevent DoS
475 * attacks in the high resolution timer case. This is
476 * compliant with the old way of self restarting
477 * itimers, as the SIGALRM is a legacy signal and only
478 * queued once. Changing the restart behaviour to
479 * restart the timer in the signal dequeue path is
480 * reducing the timer noise on heavy loaded !highres
483 if (unlikely(signr == SIGALRM)) {
484 struct hrtimer *tmr = &tsk->signal->real_timer;
486 if (!hrtimer_is_queued(tmr) &&
487 tsk->signal->it_real_incr.tv64 != 0) {
488 hrtimer_forward(tmr, tmr->base->get_time(),
489 tsk->signal->it_real_incr);
490 hrtimer_restart(tmr);
499 if (unlikely(sig_kernel_stop(signr))) {
501 * Set a marker that we have dequeued a stop signal. Our
502 * caller might release the siglock and then the pending
503 * stop signal it is about to process is no longer in the
504 * pending bitmasks, but must still be cleared by a SIGCONT
505 * (and overruled by a SIGKILL). So those cases clear this
506 * shared flag after we've set it. Note that this flag may
507 * remain set after the signal we return is ignored or
508 * handled. That doesn't matter because its only purpose
509 * is to alert stop-signal processing code when another
510 * processor has come along and cleared the flag.
512 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
514 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
516 * Release the siglock to ensure proper locking order
517 * of timer locks outside of siglocks. Note, we leave
518 * irqs disabled here, since the posix-timers code is
519 * about to disable them again anyway.
521 spin_unlock(&tsk->sighand->siglock);
522 do_schedule_next_timer(info);
523 spin_lock(&tsk->sighand->siglock);
529 * Tell a process that it has a new active signal..
531 * NOTE! we rely on the previous spin_lock to
532 * lock interrupts for us! We can only be called with
533 * "siglock" held, and the local interrupt must
534 * have been disabled when that got acquired!
536 * No need to set need_resched since signal event passing
537 * goes through ->blocked
539 void signal_wake_up(struct task_struct *t, int resume)
543 set_tsk_thread_flag(t, TIF_SIGPENDING);
546 * For SIGKILL, we want to wake it up in the stopped/traced/killable
547 * case. We don't check t->state here because there is a race with it
548 * executing another processor and just now entering stopped state.
549 * By using wake_up_state, we ensure the process will wake up and
550 * handle its death signal.
552 mask = TASK_INTERRUPTIBLE;
554 mask |= TASK_WAKEKILL;
555 if (!wake_up_state(t, mask))
560 * Remove signals in mask from the pending set and queue.
561 * Returns 1 if any signals were found.
563 * All callers must be holding the siglock.
565 * This version takes a sigset mask and looks at all signals,
566 * not just those in the first mask word.
568 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
570 struct sigqueue *q, *n;
573 sigandsets(&m, mask, &s->signal);
574 if (sigisemptyset(&m))
577 signandsets(&s->signal, &s->signal, mask);
578 list_for_each_entry_safe(q, n, &s->list, list) {
579 if (sigismember(mask, q->info.si_signo)) {
580 list_del_init(&q->list);
587 * Remove signals in mask from the pending set and queue.
588 * Returns 1 if any signals were found.
590 * All callers must be holding the siglock.
592 static int rm_from_queue(unsigned long mask, struct sigpending *s)
594 struct sigqueue *q, *n;
596 if (!sigtestsetmask(&s->signal, mask))
599 sigdelsetmask(&s->signal, mask);
600 list_for_each_entry_safe(q, n, &s->list, list) {
601 if (q->info.si_signo < SIGRTMIN &&
602 (mask & sigmask(q->info.si_signo))) {
603 list_del_init(&q->list);
610 static inline int is_si_special(const struct siginfo *info)
612 return info <= SEND_SIG_FORCED;
615 static inline bool si_fromuser(const struct siginfo *info)
617 return info == SEND_SIG_NOINFO ||
618 (!is_si_special(info) && SI_FROMUSER(info));
622 * Bad permissions for sending the signal
623 * - the caller must hold at least the RCU read lock
625 static int check_kill_permission(int sig, struct siginfo *info,
626 struct task_struct *t)
628 const struct cred *cred = current_cred(), *tcred;
632 if (!valid_signal(sig))
635 if (!si_fromuser(info))
638 error = audit_signal_info(sig, t); /* Let audit system see the signal */
642 tcred = __task_cred(t);
643 if ((cred->euid ^ tcred->suid) &&
644 (cred->euid ^ tcred->uid) &&
645 (cred->uid ^ tcred->suid) &&
646 (cred->uid ^ tcred->uid) &&
647 !capable(CAP_KILL)) {
650 sid = task_session(t);
652 * We don't return the error if sid == NULL. The
653 * task was unhashed, the caller must notice this.
655 if (!sid || sid == task_session(current))
662 return security_task_kill(t, info, sig, 0);
666 * Handle magic process-wide effects of stop/continue signals. Unlike
667 * the signal actions, these happen immediately at signal-generation
668 * time regardless of blocking, ignoring, or handling. This does the
669 * actual continuing for SIGCONT, but not the actual stopping for stop
670 * signals. The process stop is done as a signal action for SIG_DFL.
672 * Returns true if the signal should be actually delivered, otherwise
673 * it should be dropped.
675 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
677 struct signal_struct *signal = p->signal;
678 struct task_struct *t;
680 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
682 * The process is in the middle of dying, nothing to do.
684 } else if (sig_kernel_stop(sig)) {
686 * This is a stop signal. Remove SIGCONT from all queues.
688 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
691 rm_from_queue(sigmask(SIGCONT), &t->pending);
692 } while_each_thread(p, t);
693 } else if (sig == SIGCONT) {
696 * Remove all stop signals from all queues,
697 * and wake all threads.
699 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
703 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
705 * If there is a handler for SIGCONT, we must make
706 * sure that no thread returns to user mode before
707 * we post the signal, in case it was the only
708 * thread eligible to run the signal handler--then
709 * it must not do anything between resuming and
710 * running the handler. With the TIF_SIGPENDING
711 * flag set, the thread will pause and acquire the
712 * siglock that we hold now and until we've queued
713 * the pending signal.
715 * Wake up the stopped thread _after_ setting
718 state = __TASK_STOPPED;
719 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
720 set_tsk_thread_flag(t, TIF_SIGPENDING);
721 state |= TASK_INTERRUPTIBLE;
723 wake_up_state(t, state);
724 } while_each_thread(p, t);
727 * Notify the parent with CLD_CONTINUED if we were stopped.
729 * If we were in the middle of a group stop, we pretend it
730 * was already finished, and then continued. Since SIGCHLD
731 * doesn't queue we report only CLD_STOPPED, as if the next
732 * CLD_CONTINUED was dropped.
735 if (signal->flags & SIGNAL_STOP_STOPPED)
736 why |= SIGNAL_CLD_CONTINUED;
737 else if (signal->group_stop_count)
738 why |= SIGNAL_CLD_STOPPED;
742 * The first thread which returns from do_signal_stop()
743 * will take ->siglock, notice SIGNAL_CLD_MASK, and
744 * notify its parent. See get_signal_to_deliver().
746 signal->flags = why | SIGNAL_STOP_CONTINUED;
747 signal->group_stop_count = 0;
748 signal->group_exit_code = 0;
751 * We are not stopped, but there could be a stop
752 * signal in the middle of being processed after
753 * being removed from the queue. Clear that too.
755 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
759 return !sig_ignored(p, sig, from_ancestor_ns);
763 * Test if P wants to take SIG. After we've checked all threads with this,
764 * it's equivalent to finding no threads not blocking SIG. Any threads not
765 * blocking SIG were ruled out because they are not running and already
766 * have pending signals. Such threads will dequeue from the shared queue
767 * as soon as they're available, so putting the signal on the shared queue
768 * will be equivalent to sending it to one such thread.
770 static inline int wants_signal(int sig, struct task_struct *p)
772 if (sigismember(&p->blocked, sig))
774 if (p->flags & PF_EXITING)
778 if (task_is_stopped_or_traced(p))
780 return task_curr(p) || !signal_pending(p);
783 static void complete_signal(int sig, struct task_struct *p, int group)
785 struct signal_struct *signal = p->signal;
786 struct task_struct *t;
789 * Now find a thread we can wake up to take the signal off the queue.
791 * If the main thread wants the signal, it gets first crack.
792 * Probably the least surprising to the average bear.
794 if (wants_signal(sig, p))
796 else if (!group || thread_group_empty(p))
798 * There is just one thread and it does not need to be woken.
799 * It will dequeue unblocked signals before it runs again.
804 * Otherwise try to find a suitable thread.
806 t = signal->curr_target;
807 while (!wants_signal(sig, t)) {
809 if (t == signal->curr_target)
811 * No thread needs to be woken.
812 * Any eligible threads will see
813 * the signal in the queue soon.
817 signal->curr_target = t;
821 * Found a killable thread. If the signal will be fatal,
822 * then start taking the whole group down immediately.
824 if (sig_fatal(p, sig) &&
825 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
826 !sigismember(&t->real_blocked, sig) &&
828 !tracehook_consider_fatal_signal(t, sig))) {
830 * This signal will be fatal to the whole group.
832 if (!sig_kernel_coredump(sig)) {
834 * Start a group exit and wake everybody up.
835 * This way we don't have other threads
836 * running and doing things after a slower
837 * thread has the fatal signal pending.
839 signal->flags = SIGNAL_GROUP_EXIT;
840 signal->group_exit_code = sig;
841 signal->group_stop_count = 0;
844 sigaddset(&t->pending.signal, SIGKILL);
845 signal_wake_up(t, 1);
846 } while_each_thread(p, t);
852 * The signal is already in the shared-pending queue.
853 * Tell the chosen thread to wake up and dequeue it.
855 signal_wake_up(t, sig == SIGKILL);
859 static inline int legacy_queue(struct sigpending *signals, int sig)
861 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
864 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
865 int group, int from_ancestor_ns)
867 struct sigpending *pending;
871 trace_signal_generate(sig, info, t);
873 assert_spin_locked(&t->sighand->siglock);
875 if (!prepare_signal(sig, t, from_ancestor_ns))
878 pending = group ? &t->signal->shared_pending : &t->pending;
880 * Short-circuit ignored signals and support queuing
881 * exactly one non-rt signal, so that we can get more
882 * detailed information about the cause of the signal.
884 if (legacy_queue(pending, sig))
887 * fast-pathed signals for kernel-internal things like SIGSTOP
890 if (info == SEND_SIG_FORCED)
893 /* Real-time signals must be queued if sent by sigqueue, or
894 some other real-time mechanism. It is implementation
895 defined whether kill() does so. We attempt to do so, on
896 the principle of least surprise, but since kill is not
897 allowed to fail with EAGAIN when low on memory we just
898 make sure at least one signal gets delivered and don't
899 pass on the info struct. */
902 override_rlimit = (is_si_special(info) || info->si_code >= 0);
906 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
909 list_add_tail(&q->list, &pending->list);
910 switch ((unsigned long) info) {
911 case (unsigned long) SEND_SIG_NOINFO:
912 q->info.si_signo = sig;
913 q->info.si_errno = 0;
914 q->info.si_code = SI_USER;
915 q->info.si_pid = task_tgid_nr_ns(current,
916 task_active_pid_ns(t));
917 q->info.si_uid = current_uid();
919 case (unsigned long) SEND_SIG_PRIV:
920 q->info.si_signo = sig;
921 q->info.si_errno = 0;
922 q->info.si_code = SI_KERNEL;
927 copy_siginfo(&q->info, info);
928 if (from_ancestor_ns)
932 } else if (!is_si_special(info)) {
933 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
935 * Queue overflow, abort. We may abort if the
936 * signal was rt and sent by user using something
939 trace_signal_overflow_fail(sig, group, info);
943 * This is a silent loss of information. We still
944 * send the signal, but the *info bits are lost.
946 trace_signal_lose_info(sig, group, info);
951 signalfd_notify(t, sig);
952 sigaddset(&pending->signal, sig);
953 complete_signal(sig, t, group);
957 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
960 int from_ancestor_ns = 0;
963 from_ancestor_ns = si_fromuser(info) &&
964 !task_pid_nr_ns(current, task_active_pid_ns(t));
967 return __send_signal(sig, info, t, group, from_ancestor_ns);
970 static void print_fatal_signal(struct pt_regs *regs, int signr)
972 printk("%s/%d: potentially unexpected fatal signal %d.\n",
973 current->comm, task_pid_nr(current), signr);
975 #if defined(__i386__) && !defined(__arch_um__)
976 printk("code at %08lx: ", regs->ip);
979 for (i = 0; i < 16; i++) {
982 __get_user(insn, (unsigned char *)(regs->ip + i));
983 printk("%02x ", insn);
993 static int __init setup_print_fatal_signals(char *str)
995 get_option (&str, &print_fatal_signals);
1000 __setup("print-fatal-signals=", setup_print_fatal_signals);
1003 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1005 return send_signal(sig, info, p, 1);
1009 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1011 return send_signal(sig, info, t, 0);
1014 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1017 unsigned long flags;
1020 if (lock_task_sighand(p, &flags)) {
1021 ret = send_signal(sig, info, p, group);
1022 unlock_task_sighand(p, &flags);
1029 * Force a signal that the process can't ignore: if necessary
1030 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1032 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1033 * since we do not want to have a signal handler that was blocked
1034 * be invoked when user space had explicitly blocked it.
1036 * We don't want to have recursive SIGSEGV's etc, for example,
1037 * that is why we also clear SIGNAL_UNKILLABLE.
1040 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1042 unsigned long int flags;
1043 int ret, blocked, ignored;
1044 struct k_sigaction *action;
1046 spin_lock_irqsave(&t->sighand->siglock, flags);
1047 action = &t->sighand->action[sig-1];
1048 ignored = action->sa.sa_handler == SIG_IGN;
1049 blocked = sigismember(&t->blocked, sig);
1050 if (blocked || ignored) {
1051 action->sa.sa_handler = SIG_DFL;
1053 sigdelset(&t->blocked, sig);
1054 recalc_sigpending_and_wake(t);
1057 if (action->sa.sa_handler == SIG_DFL)
1058 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1059 ret = specific_send_sig_info(sig, info, t);
1060 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1066 force_sig_specific(int sig, struct task_struct *t)
1068 force_sig_info(sig, SEND_SIG_FORCED, t);
1072 * Nuke all other threads in the group.
1074 void zap_other_threads(struct task_struct *p)
1076 struct task_struct *t;
1078 p->signal->group_stop_count = 0;
1080 for (t = next_thread(p); t != p; t = next_thread(t)) {
1082 * Don't bother with already dead threads
1087 /* SIGKILL will be handled before any pending SIGSTOP */
1088 sigaddset(&t->pending.signal, SIGKILL);
1089 signal_wake_up(t, 1);
1093 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1095 struct sighand_struct *sighand;
1099 sighand = rcu_dereference(tsk->sighand);
1100 if (unlikely(sighand == NULL))
1103 spin_lock_irqsave(&sighand->siglock, *flags);
1104 if (likely(sighand == tsk->sighand))
1106 spin_unlock_irqrestore(&sighand->siglock, *flags);
1114 * send signal info to all the members of a group
1115 * - the caller must hold the RCU read lock at least
1117 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1119 int ret = check_kill_permission(sig, info, p);
1122 ret = do_send_sig_info(sig, info, p, true);
1128 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1129 * control characters do (^C, ^Z etc)
1130 * - the caller must hold at least a readlock on tasklist_lock
1132 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1134 struct task_struct *p = NULL;
1135 int retval, success;
1139 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1140 int err = group_send_sig_info(sig, info, p);
1143 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1144 return success ? 0 : retval;
1147 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1150 struct task_struct *p;
1154 p = pid_task(pid, PIDTYPE_PID);
1156 error = group_send_sig_info(sig, info, p);
1157 if (unlikely(error == -ESRCH))
1159 * The task was unhashed in between, try again.
1160 * If it is dead, pid_task() will return NULL,
1161 * if we race with de_thread() it will find the
1172 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1176 error = kill_pid_info(sig, info, find_vpid(pid));
1181 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1182 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1183 uid_t uid, uid_t euid, u32 secid)
1186 struct task_struct *p;
1187 const struct cred *pcred;
1189 if (!valid_signal(sig))
1192 read_lock(&tasklist_lock);
1193 p = pid_task(pid, PIDTYPE_PID);
1198 pcred = __task_cred(p);
1199 if (si_fromuser(info) &&
1200 euid != pcred->suid && euid != pcred->uid &&
1201 uid != pcred->suid && uid != pcred->uid) {
1205 ret = security_task_kill(p, info, sig, secid);
1208 if (sig && p->sighand) {
1209 unsigned long flags;
1210 spin_lock_irqsave(&p->sighand->siglock, flags);
1211 ret = __send_signal(sig, info, p, 1, 0);
1212 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1215 read_unlock(&tasklist_lock);
1218 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1221 * kill_something_info() interprets pid in interesting ways just like kill(2).
1223 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1224 * is probably wrong. Should make it like BSD or SYSV.
1227 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1233 ret = kill_pid_info(sig, info, find_vpid(pid));
1238 read_lock(&tasklist_lock);
1240 ret = __kill_pgrp_info(sig, info,
1241 pid ? find_vpid(-pid) : task_pgrp(current));
1243 int retval = 0, count = 0;
1244 struct task_struct * p;
1246 for_each_process(p) {
1247 if (task_pid_vnr(p) > 1 &&
1248 !same_thread_group(p, current)) {
1249 int err = group_send_sig_info(sig, info, p);
1255 ret = count ? retval : -ESRCH;
1257 read_unlock(&tasklist_lock);
1263 * These are for backward compatibility with the rest of the kernel source.
1267 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1270 * Make sure legacy kernel users don't send in bad values
1271 * (normal paths check this in check_kill_permission).
1273 if (!valid_signal(sig))
1276 return do_send_sig_info(sig, info, p, false);
1279 #define __si_special(priv) \
1280 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1283 send_sig(int sig, struct task_struct *p, int priv)
1285 return send_sig_info(sig, __si_special(priv), p);
1289 force_sig(int sig, struct task_struct *p)
1291 force_sig_info(sig, SEND_SIG_PRIV, p);
1295 * When things go south during signal handling, we
1296 * will force a SIGSEGV. And if the signal that caused
1297 * the problem was already a SIGSEGV, we'll want to
1298 * make sure we don't even try to deliver the signal..
1301 force_sigsegv(int sig, struct task_struct *p)
1303 if (sig == SIGSEGV) {
1304 unsigned long flags;
1305 spin_lock_irqsave(&p->sighand->siglock, flags);
1306 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1307 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1309 force_sig(SIGSEGV, p);
1313 int kill_pgrp(struct pid *pid, int sig, int priv)
1317 read_lock(&tasklist_lock);
1318 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1319 read_unlock(&tasklist_lock);
1323 EXPORT_SYMBOL(kill_pgrp);
1325 int kill_pid(struct pid *pid, int sig, int priv)
1327 return kill_pid_info(sig, __si_special(priv), pid);
1329 EXPORT_SYMBOL(kill_pid);
1332 * These functions support sending signals using preallocated sigqueue
1333 * structures. This is needed "because realtime applications cannot
1334 * afford to lose notifications of asynchronous events, like timer
1335 * expirations or I/O completions". In the case of Posix Timers
1336 * we allocate the sigqueue structure from the timer_create. If this
1337 * allocation fails we are able to report the failure to the application
1338 * with an EAGAIN error.
1340 struct sigqueue *sigqueue_alloc(void)
1342 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1345 q->flags |= SIGQUEUE_PREALLOC;
1350 void sigqueue_free(struct sigqueue *q)
1352 unsigned long flags;
1353 spinlock_t *lock = ¤t->sighand->siglock;
1355 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1357 * We must hold ->siglock while testing q->list
1358 * to serialize with collect_signal() or with
1359 * __exit_signal()->flush_sigqueue().
1361 spin_lock_irqsave(lock, flags);
1362 q->flags &= ~SIGQUEUE_PREALLOC;
1364 * If it is queued it will be freed when dequeued,
1365 * like the "regular" sigqueue.
1367 if (!list_empty(&q->list))
1369 spin_unlock_irqrestore(lock, flags);
1375 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1377 int sig = q->info.si_signo;
1378 struct sigpending *pending;
1379 unsigned long flags;
1382 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1385 if (!likely(lock_task_sighand(t, &flags)))
1388 ret = 1; /* the signal is ignored */
1389 if (!prepare_signal(sig, t, 0))
1393 if (unlikely(!list_empty(&q->list))) {
1395 * If an SI_TIMER entry is already queue just increment
1396 * the overrun count.
1398 BUG_ON(q->info.si_code != SI_TIMER);
1399 q->info.si_overrun++;
1402 q->info.si_overrun = 0;
1404 signalfd_notify(t, sig);
1405 pending = group ? &t->signal->shared_pending : &t->pending;
1406 list_add_tail(&q->list, &pending->list);
1407 sigaddset(&pending->signal, sig);
1408 complete_signal(sig, t, group);
1410 unlock_task_sighand(t, &flags);
1416 * Let a parent know about the death of a child.
1417 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1419 * Returns -1 if our parent ignored us and so we've switched to
1420 * self-reaping, or else @sig.
1422 int do_notify_parent(struct task_struct *tsk, int sig)
1424 struct siginfo info;
1425 unsigned long flags;
1426 struct sighand_struct *psig;
1431 /* do_notify_parent_cldstop should have been called instead. */
1432 BUG_ON(task_is_stopped_or_traced(tsk));
1434 BUG_ON(!task_ptrace(tsk) &&
1435 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1437 info.si_signo = sig;
1440 * we are under tasklist_lock here so our parent is tied to
1441 * us and cannot exit and release its namespace.
1443 * the only it can is to switch its nsproxy with sys_unshare,
1444 * bu uncharing pid namespaces is not allowed, so we'll always
1445 * see relevant namespace
1447 * write_lock() currently calls preempt_disable() which is the
1448 * same as rcu_read_lock(), but according to Oleg, this is not
1449 * correct to rely on this
1452 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1453 info.si_uid = __task_cred(tsk)->uid;
1456 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1457 tsk->signal->utime));
1458 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1459 tsk->signal->stime));
1461 info.si_status = tsk->exit_code & 0x7f;
1462 if (tsk->exit_code & 0x80)
1463 info.si_code = CLD_DUMPED;
1464 else if (tsk->exit_code & 0x7f)
1465 info.si_code = CLD_KILLED;
1467 info.si_code = CLD_EXITED;
1468 info.si_status = tsk->exit_code >> 8;
1471 psig = tsk->parent->sighand;
1472 spin_lock_irqsave(&psig->siglock, flags);
1473 if (!task_ptrace(tsk) && sig == SIGCHLD &&
1474 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1475 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1477 * We are exiting and our parent doesn't care. POSIX.1
1478 * defines special semantics for setting SIGCHLD to SIG_IGN
1479 * or setting the SA_NOCLDWAIT flag: we should be reaped
1480 * automatically and not left for our parent's wait4 call.
1481 * Rather than having the parent do it as a magic kind of
1482 * signal handler, we just set this to tell do_exit that we
1483 * can be cleaned up without becoming a zombie. Note that
1484 * we still call __wake_up_parent in this case, because a
1485 * blocked sys_wait4 might now return -ECHILD.
1487 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1488 * is implementation-defined: we do (if you don't want
1489 * it, just use SIG_IGN instead).
1491 ret = tsk->exit_signal = -1;
1492 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1495 if (valid_signal(sig) && sig > 0)
1496 __group_send_sig_info(sig, &info, tsk->parent);
1497 __wake_up_parent(tsk, tsk->parent);
1498 spin_unlock_irqrestore(&psig->siglock, flags);
1503 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1505 struct siginfo info;
1506 unsigned long flags;
1507 struct task_struct *parent;
1508 struct sighand_struct *sighand;
1510 if (task_ptrace(tsk))
1511 parent = tsk->parent;
1513 tsk = tsk->group_leader;
1514 parent = tsk->real_parent;
1517 info.si_signo = SIGCHLD;
1520 * see comment in do_notify_parent() abot the following 3 lines
1523 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1524 info.si_uid = __task_cred(tsk)->uid;
1527 info.si_utime = cputime_to_clock_t(tsk->utime);
1528 info.si_stime = cputime_to_clock_t(tsk->stime);
1533 info.si_status = SIGCONT;
1536 info.si_status = tsk->signal->group_exit_code & 0x7f;
1539 info.si_status = tsk->exit_code & 0x7f;
1545 sighand = parent->sighand;
1546 spin_lock_irqsave(&sighand->siglock, flags);
1547 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1548 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1549 __group_send_sig_info(SIGCHLD, &info, parent);
1551 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1553 __wake_up_parent(tsk, parent);
1554 spin_unlock_irqrestore(&sighand->siglock, flags);
1557 static inline int may_ptrace_stop(void)
1559 if (!likely(task_ptrace(current)))
1562 * Are we in the middle of do_coredump?
1563 * If so and our tracer is also part of the coredump stopping
1564 * is a deadlock situation, and pointless because our tracer
1565 * is dead so don't allow us to stop.
1566 * If SIGKILL was already sent before the caller unlocked
1567 * ->siglock we must see ->core_state != NULL. Otherwise it
1568 * is safe to enter schedule().
1570 if (unlikely(current->mm->core_state) &&
1571 unlikely(current->mm == current->parent->mm))
1578 * Return nonzero if there is a SIGKILL that should be waking us up.
1579 * Called with the siglock held.
1581 static int sigkill_pending(struct task_struct *tsk)
1583 return sigismember(&tsk->pending.signal, SIGKILL) ||
1584 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1588 * This must be called with current->sighand->siglock held.
1590 * This should be the path for all ptrace stops.
1591 * We always set current->last_siginfo while stopped here.
1592 * That makes it a way to test a stopped process for
1593 * being ptrace-stopped vs being job-control-stopped.
1595 * If we actually decide not to stop at all because the tracer
1596 * is gone, we keep current->exit_code unless clear_code.
1598 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1600 if (arch_ptrace_stop_needed(exit_code, info)) {
1602 * The arch code has something special to do before a
1603 * ptrace stop. This is allowed to block, e.g. for faults
1604 * on user stack pages. We can't keep the siglock while
1605 * calling arch_ptrace_stop, so we must release it now.
1606 * To preserve proper semantics, we must do this before
1607 * any signal bookkeeping like checking group_stop_count.
1608 * Meanwhile, a SIGKILL could come in before we retake the
1609 * siglock. That must prevent us from sleeping in TASK_TRACED.
1610 * So after regaining the lock, we must check for SIGKILL.
1612 spin_unlock_irq(¤t->sighand->siglock);
1613 arch_ptrace_stop(exit_code, info);
1614 spin_lock_irq(¤t->sighand->siglock);
1615 if (sigkill_pending(current))
1620 * If there is a group stop in progress,
1621 * we must participate in the bookkeeping.
1623 if (current->signal->group_stop_count > 0)
1624 --current->signal->group_stop_count;
1626 current->last_siginfo = info;
1627 current->exit_code = exit_code;
1629 /* Let the debugger run. */
1630 __set_current_state(TASK_TRACED);
1631 spin_unlock_irq(¤t->sighand->siglock);
1632 read_lock(&tasklist_lock);
1633 if (may_ptrace_stop()) {
1634 do_notify_parent_cldstop(current, CLD_TRAPPED);
1636 * Don't want to allow preemption here, because
1637 * sys_ptrace() needs this task to be inactive.
1639 * XXX: implement read_unlock_no_resched().
1642 read_unlock(&tasklist_lock);
1643 preempt_enable_no_resched();
1647 * By the time we got the lock, our tracer went away.
1648 * Don't drop the lock yet, another tracer may come.
1650 __set_current_state(TASK_RUNNING);
1652 current->exit_code = 0;
1653 read_unlock(&tasklist_lock);
1657 * While in TASK_TRACED, we were considered "frozen enough".
1658 * Now that we woke up, it's crucial if we're supposed to be
1659 * frozen that we freeze now before running anything substantial.
1664 * We are back. Now reacquire the siglock before touching
1665 * last_siginfo, so that we are sure to have synchronized with
1666 * any signal-sending on another CPU that wants to examine it.
1668 spin_lock_irq(¤t->sighand->siglock);
1669 current->last_siginfo = NULL;
1672 * Queued signals ignored us while we were stopped for tracing.
1673 * So check for any that we should take before resuming user mode.
1674 * This sets TIF_SIGPENDING, but never clears it.
1676 recalc_sigpending_tsk(current);
1679 void ptrace_notify(int exit_code)
1683 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1685 memset(&info, 0, sizeof info);
1686 info.si_signo = SIGTRAP;
1687 info.si_code = exit_code;
1688 info.si_pid = task_pid_vnr(current);
1689 info.si_uid = current_uid();
1691 /* Let the debugger run. */
1692 spin_lock_irq(¤t->sighand->siglock);
1693 ptrace_stop(exit_code, 1, &info);
1694 spin_unlock_irq(¤t->sighand->siglock);
1698 * This performs the stopping for SIGSTOP and other stop signals.
1699 * We have to stop all threads in the thread group.
1700 * Returns nonzero if we've actually stopped and released the siglock.
1701 * Returns zero if we didn't stop and still hold the siglock.
1703 static int do_signal_stop(int signr)
1705 struct signal_struct *sig = current->signal;
1708 if (!sig->group_stop_count) {
1709 struct task_struct *t;
1711 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1712 unlikely(signal_group_exit(sig)))
1715 * There is no group stop already in progress.
1716 * We must initiate one now.
1718 sig->group_exit_code = signr;
1720 sig->group_stop_count = 1;
1721 for (t = next_thread(current); t != current; t = next_thread(t))
1723 * Setting state to TASK_STOPPED for a group
1724 * stop is always done with the siglock held,
1725 * so this check has no races.
1727 if (!(t->flags & PF_EXITING) &&
1728 !task_is_stopped_or_traced(t)) {
1729 sig->group_stop_count++;
1730 signal_wake_up(t, 0);
1734 * If there are no other threads in the group, or if there is
1735 * a group stop in progress and we are the last to stop, report
1736 * to the parent. When ptraced, every thread reports itself.
1738 notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1739 notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1741 * tracehook_notify_jctl() can drop and reacquire siglock, so
1742 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1743 * or SIGKILL comes in between ->group_stop_count == 0.
1745 if (sig->group_stop_count) {
1746 if (!--sig->group_stop_count)
1747 sig->flags = SIGNAL_STOP_STOPPED;
1748 current->exit_code = sig->group_exit_code;
1749 __set_current_state(TASK_STOPPED);
1751 spin_unlock_irq(¤t->sighand->siglock);
1754 read_lock(&tasklist_lock);
1755 do_notify_parent_cldstop(current, notify);
1756 read_unlock(&tasklist_lock);
1759 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1762 } while (try_to_freeze());
1764 tracehook_finish_jctl();
1765 current->exit_code = 0;
1770 static int ptrace_signal(int signr, siginfo_t *info,
1771 struct pt_regs *regs, void *cookie)
1773 if (!task_ptrace(current))
1776 ptrace_signal_deliver(regs, cookie);
1778 /* Let the debugger run. */
1779 ptrace_stop(signr, 0, info);
1781 /* We're back. Did the debugger cancel the sig? */
1782 signr = current->exit_code;
1786 current->exit_code = 0;
1788 /* Update the siginfo structure if the signal has
1789 changed. If the debugger wanted something
1790 specific in the siginfo structure then it should
1791 have updated *info via PTRACE_SETSIGINFO. */
1792 if (signr != info->si_signo) {
1793 info->si_signo = signr;
1795 info->si_code = SI_USER;
1796 info->si_pid = task_pid_vnr(current->parent);
1797 info->si_uid = task_uid(current->parent);
1800 /* If the (new) signal is now blocked, requeue it. */
1801 if (sigismember(¤t->blocked, signr)) {
1802 specific_send_sig_info(signr, info, current);
1809 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1810 struct pt_regs *regs, void *cookie)
1812 struct sighand_struct *sighand = current->sighand;
1813 struct signal_struct *signal = current->signal;
1818 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1819 * While in TASK_STOPPED, we were considered "frozen enough".
1820 * Now that we woke up, it's crucial if we're supposed to be
1821 * frozen that we freeze now before running anything substantial.
1825 spin_lock_irq(&sighand->siglock);
1827 * Every stopped thread goes here after wakeup. Check to see if
1828 * we should notify the parent, prepare_signal(SIGCONT) encodes
1829 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1831 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1832 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1833 ? CLD_CONTINUED : CLD_STOPPED;
1834 signal->flags &= ~SIGNAL_CLD_MASK;
1836 why = tracehook_notify_jctl(why, CLD_CONTINUED);
1837 spin_unlock_irq(&sighand->siglock);
1840 read_lock(&tasklist_lock);
1841 do_notify_parent_cldstop(current->group_leader, why);
1842 read_unlock(&tasklist_lock);
1848 struct k_sigaction *ka;
1850 if (unlikely(signal->group_stop_count > 0) &&
1855 * Tracing can induce an artifical signal and choose sigaction.
1856 * The return value in @signr determines the default action,
1857 * but @info->si_signo is the signal number we will report.
1859 signr = tracehook_get_signal(current, regs, info, return_ka);
1860 if (unlikely(signr < 0))
1862 if (unlikely(signr != 0))
1865 signr = dequeue_signal(current, ¤t->blocked,
1869 break; /* will return 0 */
1871 if (signr != SIGKILL) {
1872 signr = ptrace_signal(signr, info,
1878 ka = &sighand->action[signr-1];
1881 /* Trace actually delivered signals. */
1882 trace_signal_deliver(signr, info, ka);
1884 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1886 if (ka->sa.sa_handler != SIG_DFL) {
1887 /* Run the handler. */
1890 if (ka->sa.sa_flags & SA_ONESHOT)
1891 ka->sa.sa_handler = SIG_DFL;
1893 break; /* will return non-zero "signr" value */
1897 * Now we are doing the default action for this signal.
1899 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1903 * Global init gets no signals it doesn't want.
1904 * Container-init gets no signals it doesn't want from same
1907 * Note that if global/container-init sees a sig_kernel_only()
1908 * signal here, the signal must have been generated internally
1909 * or must have come from an ancestor namespace. In either
1910 * case, the signal cannot be dropped.
1912 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1913 !sig_kernel_only(signr))
1916 if (sig_kernel_stop(signr)) {
1918 * The default action is to stop all threads in
1919 * the thread group. The job control signals
1920 * do nothing in an orphaned pgrp, but SIGSTOP
1921 * always works. Note that siglock needs to be
1922 * dropped during the call to is_orphaned_pgrp()
1923 * because of lock ordering with tasklist_lock.
1924 * This allows an intervening SIGCONT to be posted.
1925 * We need to check for that and bail out if necessary.
1927 if (signr != SIGSTOP) {
1928 spin_unlock_irq(&sighand->siglock);
1930 /* signals can be posted during this window */
1932 if (is_current_pgrp_orphaned())
1935 spin_lock_irq(&sighand->siglock);
1938 if (likely(do_signal_stop(info->si_signo))) {
1939 /* It released the siglock. */
1944 * We didn't actually stop, due to a race
1945 * with SIGCONT or something like that.
1950 spin_unlock_irq(&sighand->siglock);
1953 * Anything else is fatal, maybe with a core dump.
1955 current->flags |= PF_SIGNALED;
1957 if (sig_kernel_coredump(signr)) {
1958 if (print_fatal_signals)
1959 print_fatal_signal(regs, info->si_signo);
1961 * If it was able to dump core, this kills all
1962 * other threads in the group and synchronizes with
1963 * their demise. If we lost the race with another
1964 * thread getting here, it set group_exit_code
1965 * first and our do_group_exit call below will use
1966 * that value and ignore the one we pass it.
1968 do_coredump(info->si_signo, info->si_signo, regs);
1972 * Death signals, no core dump.
1974 do_group_exit(info->si_signo);
1977 spin_unlock_irq(&sighand->siglock);
1981 void exit_signals(struct task_struct *tsk)
1984 struct task_struct *t;
1986 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1987 tsk->flags |= PF_EXITING;
1991 spin_lock_irq(&tsk->sighand->siglock);
1993 * From now this task is not visible for group-wide signals,
1994 * see wants_signal(), do_signal_stop().
1996 tsk->flags |= PF_EXITING;
1997 if (!signal_pending(tsk))
2000 /* It could be that __group_complete_signal() choose us to
2001 * notify about group-wide signal. Another thread should be
2002 * woken now to take the signal since we will not.
2004 for (t = tsk; (t = next_thread(t)) != tsk; )
2005 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2006 recalc_sigpending_and_wake(t);
2008 if (unlikely(tsk->signal->group_stop_count) &&
2009 !--tsk->signal->group_stop_count) {
2010 tsk->signal->flags = SIGNAL_STOP_STOPPED;
2011 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
2014 spin_unlock_irq(&tsk->sighand->siglock);
2016 if (unlikely(group_stop)) {
2017 read_lock(&tasklist_lock);
2018 do_notify_parent_cldstop(tsk, group_stop);
2019 read_unlock(&tasklist_lock);
2023 EXPORT_SYMBOL(recalc_sigpending);
2024 EXPORT_SYMBOL_GPL(dequeue_signal);
2025 EXPORT_SYMBOL(flush_signals);
2026 EXPORT_SYMBOL(force_sig);
2027 EXPORT_SYMBOL(send_sig);
2028 EXPORT_SYMBOL(send_sig_info);
2029 EXPORT_SYMBOL(sigprocmask);
2030 EXPORT_SYMBOL(block_all_signals);
2031 EXPORT_SYMBOL(unblock_all_signals);
2035 * System call entry points.
2038 SYSCALL_DEFINE0(restart_syscall)
2040 struct restart_block *restart = ¤t_thread_info()->restart_block;
2041 return restart->fn(restart);
2044 long do_no_restart_syscall(struct restart_block *param)
2050 * We don't need to get the kernel lock - this is all local to this
2051 * particular thread.. (and that's good, because this is _heavily_
2052 * used by various programs)
2056 * This is also useful for kernel threads that want to temporarily
2057 * (or permanently) block certain signals.
2059 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2060 * interface happily blocks "unblockable" signals like SIGKILL
2063 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2067 spin_lock_irq(¤t->sighand->siglock);
2069 *oldset = current->blocked;
2074 sigorsets(¤t->blocked, ¤t->blocked, set);
2077 signandsets(¤t->blocked, ¤t->blocked, set);
2080 current->blocked = *set;
2085 recalc_sigpending();
2086 spin_unlock_irq(¤t->sighand->siglock);
2091 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2092 sigset_t __user *, oset, size_t, sigsetsize)
2094 int error = -EINVAL;
2095 sigset_t old_set, new_set;
2097 /* XXX: Don't preclude handling different sized sigset_t's. */
2098 if (sigsetsize != sizeof(sigset_t))
2103 if (copy_from_user(&new_set, set, sizeof(*set)))
2105 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2107 error = sigprocmask(how, &new_set, &old_set);
2113 spin_lock_irq(¤t->sighand->siglock);
2114 old_set = current->blocked;
2115 spin_unlock_irq(¤t->sighand->siglock);
2119 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2127 long do_sigpending(void __user *set, unsigned long sigsetsize)
2129 long error = -EINVAL;
2132 if (sigsetsize > sizeof(sigset_t))
2135 spin_lock_irq(¤t->sighand->siglock);
2136 sigorsets(&pending, ¤t->pending.signal,
2137 ¤t->signal->shared_pending.signal);
2138 spin_unlock_irq(¤t->sighand->siglock);
2140 /* Outside the lock because only this thread touches it. */
2141 sigandsets(&pending, ¤t->blocked, &pending);
2144 if (!copy_to_user(set, &pending, sigsetsize))
2151 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2153 return do_sigpending(set, sigsetsize);
2156 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2158 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2162 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2164 if (from->si_code < 0)
2165 return __copy_to_user(to, from, sizeof(siginfo_t))
2168 * If you change siginfo_t structure, please be sure
2169 * this code is fixed accordingly.
2170 * Please remember to update the signalfd_copyinfo() function
2171 * inside fs/signalfd.c too, in case siginfo_t changes.
2172 * It should never copy any pad contained in the structure
2173 * to avoid security leaks, but must copy the generic
2174 * 3 ints plus the relevant union member.
2176 err = __put_user(from->si_signo, &to->si_signo);
2177 err |= __put_user(from->si_errno, &to->si_errno);
2178 err |= __put_user((short)from->si_code, &to->si_code);
2179 switch (from->si_code & __SI_MASK) {
2181 err |= __put_user(from->si_pid, &to->si_pid);
2182 err |= __put_user(from->si_uid, &to->si_uid);
2185 err |= __put_user(from->si_tid, &to->si_tid);
2186 err |= __put_user(from->si_overrun, &to->si_overrun);
2187 err |= __put_user(from->si_ptr, &to->si_ptr);
2190 err |= __put_user(from->si_band, &to->si_band);
2191 err |= __put_user(from->si_fd, &to->si_fd);
2194 err |= __put_user(from->si_addr, &to->si_addr);
2195 #ifdef __ARCH_SI_TRAPNO
2196 err |= __put_user(from->si_trapno, &to->si_trapno);
2200 err |= __put_user(from->si_pid, &to->si_pid);
2201 err |= __put_user(from->si_uid, &to->si_uid);
2202 err |= __put_user(from->si_status, &to->si_status);
2203 err |= __put_user(from->si_utime, &to->si_utime);
2204 err |= __put_user(from->si_stime, &to->si_stime);
2206 case __SI_RT: /* This is not generated by the kernel as of now. */
2207 case __SI_MESGQ: /* But this is */
2208 err |= __put_user(from->si_pid, &to->si_pid);
2209 err |= __put_user(from->si_uid, &to->si_uid);
2210 err |= __put_user(from->si_ptr, &to->si_ptr);
2212 default: /* this is just in case for now ... */
2213 err |= __put_user(from->si_pid, &to->si_pid);
2214 err |= __put_user(from->si_uid, &to->si_uid);
2222 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2223 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2232 /* XXX: Don't preclude handling different sized sigset_t's. */
2233 if (sigsetsize != sizeof(sigset_t))
2236 if (copy_from_user(&these, uthese, sizeof(these)))
2240 * Invert the set of allowed signals to get those we
2243 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2247 if (copy_from_user(&ts, uts, sizeof(ts)))
2249 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2254 spin_lock_irq(¤t->sighand->siglock);
2255 sig = dequeue_signal(current, &these, &info);
2257 timeout = MAX_SCHEDULE_TIMEOUT;
2259 timeout = (timespec_to_jiffies(&ts)
2260 + (ts.tv_sec || ts.tv_nsec));
2263 /* None ready -- temporarily unblock those we're
2264 * interested while we are sleeping in so that we'll
2265 * be awakened when they arrive. */
2266 current->real_blocked = current->blocked;
2267 sigandsets(¤t->blocked, ¤t->blocked, &these);
2268 recalc_sigpending();
2269 spin_unlock_irq(¤t->sighand->siglock);
2271 timeout = schedule_timeout_interruptible(timeout);
2273 spin_lock_irq(¤t->sighand->siglock);
2274 sig = dequeue_signal(current, &these, &info);
2275 current->blocked = current->real_blocked;
2276 siginitset(¤t->real_blocked, 0);
2277 recalc_sigpending();
2280 spin_unlock_irq(¤t->sighand->siglock);
2285 if (copy_siginfo_to_user(uinfo, &info))
2297 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2299 struct siginfo info;
2301 info.si_signo = sig;
2303 info.si_code = SI_USER;
2304 info.si_pid = task_tgid_vnr(current);
2305 info.si_uid = current_uid();
2307 return kill_something_info(sig, &info, pid);
2311 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2313 struct task_struct *p;
2317 p = find_task_by_vpid(pid);
2318 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2319 error = check_kill_permission(sig, info, p);
2321 * The null signal is a permissions and process existence
2322 * probe. No signal is actually delivered.
2324 if (!error && sig) {
2325 error = do_send_sig_info(sig, info, p, false);
2327 * If lock_task_sighand() failed we pretend the task
2328 * dies after receiving the signal. The window is tiny,
2329 * and the signal is private anyway.
2331 if (unlikely(error == -ESRCH))
2340 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2342 struct siginfo info;
2344 info.si_signo = sig;
2346 info.si_code = SI_TKILL;
2347 info.si_pid = task_tgid_vnr(current);
2348 info.si_uid = current_uid();
2350 return do_send_specific(tgid, pid, sig, &info);
2354 * sys_tgkill - send signal to one specific thread
2355 * @tgid: the thread group ID of the thread
2356 * @pid: the PID of the thread
2357 * @sig: signal to be sent
2359 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2360 * exists but it's not belonging to the target process anymore. This
2361 * method solves the problem of threads exiting and PIDs getting reused.
2363 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2365 /* This is only valid for single tasks */
2366 if (pid <= 0 || tgid <= 0)
2369 return do_tkill(tgid, pid, sig);
2373 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2375 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2377 /* This is only valid for single tasks */
2381 return do_tkill(0, pid, sig);
2384 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2385 siginfo_t __user *, uinfo)
2389 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2392 /* Not even root can pretend to send signals from the kernel.
2393 Nor can they impersonate a kill(), which adds source info. */
2394 if (info.si_code >= 0)
2396 info.si_signo = sig;
2398 /* POSIX.1b doesn't mention process groups. */
2399 return kill_proc_info(sig, &info, pid);
2402 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2404 /* This is only valid for single tasks */
2405 if (pid <= 0 || tgid <= 0)
2408 /* Not even root can pretend to send signals from the kernel.
2409 Nor can they impersonate a kill(), which adds source info. */
2410 if (info->si_code >= 0)
2412 info->si_signo = sig;
2414 return do_send_specific(tgid, pid, sig, info);
2417 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2418 siginfo_t __user *, uinfo)
2422 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2425 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2428 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2430 struct task_struct *t = current;
2431 struct k_sigaction *k;
2434 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2437 k = &t->sighand->action[sig-1];
2439 spin_lock_irq(¤t->sighand->siglock);
2444 sigdelsetmask(&act->sa.sa_mask,
2445 sigmask(SIGKILL) | sigmask(SIGSTOP));
2449 * "Setting a signal action to SIG_IGN for a signal that is
2450 * pending shall cause the pending signal to be discarded,
2451 * whether or not it is blocked."
2453 * "Setting a signal action to SIG_DFL for a signal that is
2454 * pending and whose default action is to ignore the signal
2455 * (for example, SIGCHLD), shall cause the pending signal to
2456 * be discarded, whether or not it is blocked"
2458 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2460 sigaddset(&mask, sig);
2461 rm_from_queue_full(&mask, &t->signal->shared_pending);
2463 rm_from_queue_full(&mask, &t->pending);
2465 } while (t != current);
2469 spin_unlock_irq(¤t->sighand->siglock);
2474 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2479 oss.ss_sp = (void __user *) current->sas_ss_sp;
2480 oss.ss_size = current->sas_ss_size;
2481 oss.ss_flags = sas_ss_flags(sp);
2489 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2491 error = __get_user(ss_sp, &uss->ss_sp) |
2492 __get_user(ss_flags, &uss->ss_flags) |
2493 __get_user(ss_size, &uss->ss_size);
2498 if (on_sig_stack(sp))
2504 * Note - this code used to test ss_flags incorrectly
2505 * old code may have been written using ss_flags==0
2506 * to mean ss_flags==SS_ONSTACK (as this was the only
2507 * way that worked) - this fix preserves that older
2510 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2513 if (ss_flags == SS_DISABLE) {
2518 if (ss_size < MINSIGSTKSZ)
2522 current->sas_ss_sp = (unsigned long) ss_sp;
2523 current->sas_ss_size = ss_size;
2529 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2531 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2532 __put_user(oss.ss_size, &uoss->ss_size) |
2533 __put_user(oss.ss_flags, &uoss->ss_flags);
2540 #ifdef __ARCH_WANT_SYS_SIGPENDING
2542 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2544 return do_sigpending(set, sizeof(*set));
2549 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2550 /* Some platforms have their own version with special arguments others
2551 support only sys_rt_sigprocmask. */
2553 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2554 old_sigset_t __user *, oset)
2557 old_sigset_t old_set, new_set;
2561 if (copy_from_user(&new_set, set, sizeof(*set)))
2563 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2565 spin_lock_irq(¤t->sighand->siglock);
2566 old_set = current->blocked.sig[0];
2574 sigaddsetmask(¤t->blocked, new_set);
2577 sigdelsetmask(¤t->blocked, new_set);
2580 current->blocked.sig[0] = new_set;
2584 recalc_sigpending();
2585 spin_unlock_irq(¤t->sighand->siglock);
2591 old_set = current->blocked.sig[0];
2594 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2601 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2603 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2604 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2605 const struct sigaction __user *, act,
2606 struct sigaction __user *, oact,
2609 struct k_sigaction new_sa, old_sa;
2612 /* XXX: Don't preclude handling different sized sigset_t's. */
2613 if (sigsetsize != sizeof(sigset_t))
2617 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2621 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2624 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2630 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2632 #ifdef __ARCH_WANT_SYS_SGETMASK
2635 * For backwards compatibility. Functionality superseded by sigprocmask.
2637 SYSCALL_DEFINE0(sgetmask)
2640 return current->blocked.sig[0];
2643 SYSCALL_DEFINE1(ssetmask, int, newmask)
2647 spin_lock_irq(¤t->sighand->siglock);
2648 old = current->blocked.sig[0];
2650 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2652 recalc_sigpending();
2653 spin_unlock_irq(¤t->sighand->siglock);
2657 #endif /* __ARCH_WANT_SGETMASK */
2659 #ifdef __ARCH_WANT_SYS_SIGNAL
2661 * For backwards compatibility. Functionality superseded by sigaction.
2663 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2665 struct k_sigaction new_sa, old_sa;
2668 new_sa.sa.sa_handler = handler;
2669 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2670 sigemptyset(&new_sa.sa.sa_mask);
2672 ret = do_sigaction(sig, &new_sa, &old_sa);
2674 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2676 #endif /* __ARCH_WANT_SYS_SIGNAL */
2678 #ifdef __ARCH_WANT_SYS_PAUSE
2680 SYSCALL_DEFINE0(pause)
2682 current->state = TASK_INTERRUPTIBLE;
2684 return -ERESTARTNOHAND;
2689 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2690 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2694 /* XXX: Don't preclude handling different sized sigset_t's. */
2695 if (sigsetsize != sizeof(sigset_t))
2698 if (copy_from_user(&newset, unewset, sizeof(newset)))
2700 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2702 spin_lock_irq(¤t->sighand->siglock);
2703 current->saved_sigmask = current->blocked;
2704 current->blocked = newset;
2705 recalc_sigpending();
2706 spin_unlock_irq(¤t->sighand->siglock);
2708 current->state = TASK_INTERRUPTIBLE;
2710 set_restore_sigmask();
2711 return -ERESTARTNOHAND;
2713 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2715 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2720 void __init signals_init(void)
2722 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);