2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h" /* audit_signal_info() */
41 * SLAB caches for signal bits.
44 static struct kmem_cache *sigqueue_cachep;
46 int print_fatal_signals __read_mostly;
48 static void __user *sig_handler(struct task_struct *t, int sig)
50 return t->sighand->action[sig - 1].sa.sa_handler;
53 static int sig_handler_ignored(void __user *handler, int sig)
55 /* Is it explicitly or implicitly ignored? */
56 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
60 static int sig_task_ignored(struct task_struct *t, int sig,
65 handler = sig_handler(t, sig);
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 handler == SIG_DFL && !from_ancestor_ns)
71 return sig_handler_ignored(handler, sig);
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
84 if (!sig_task_ignored(t, sig, from_ancestor_ns))
88 * Tracers may want to know about even ignored signals.
90 return !tracehook_consider_ignored_signal(t, sig);
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
102 switch (_NSIG_WORDS) {
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
125 static int recalc_sigpending_tsk(struct task_struct *t)
127 if ((t->group_stop & GROUP_STOP_PENDING) ||
128 PENDING(&t->pending, &t->blocked) ||
129 PENDING(&t->signal->shared_pending, &t->blocked)) {
130 set_tsk_thread_flag(t, TIF_SIGPENDING);
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
145 void recalc_sigpending_and_wake(struct task_struct *t)
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
151 void recalc_sigpending(void)
153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 clear_thread_flag(TIF_SIGPENDING);
160 /* Given the mask, find the first available signal that should be serviced. */
162 #define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
166 int next_signal(struct sigpending *pending, sigset_t *mask)
168 unsigned long i, *s, *m, x;
171 s = pending->signal.sig;
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
186 switch (_NSIG_WORDS) {
188 for (i = 1; i < _NSIG_WORDS; ++i) {
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
201 sig = ffz(~x) + _NSIG_BPW + 1;
212 static inline void print_dropped_signal(int sig)
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
216 if (!print_fatal_signals)
219 if (!__ratelimit(&ratelimit_state))
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
227 * task_clear_group_stop_trapping - clear group stop trapping bit
230 * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
231 * and wake up the ptracer. Note that we don't need any further locking.
232 * @task->siglock guarantees that @task->parent points to the ptracer.
235 * Must be called with @task->sighand->siglock held.
237 static void task_clear_group_stop_trapping(struct task_struct *task)
239 if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
240 task->group_stop &= ~GROUP_STOP_TRAPPING;
241 __wake_up_sync(&task->parent->signal->wait_chldexit,
242 TASK_UNINTERRUPTIBLE, 1);
247 * task_clear_group_stop_pending - clear pending group stop
250 * Clear group stop states for @task.
253 * Must be called with @task->sighand->siglock held.
255 void task_clear_group_stop_pending(struct task_struct *task)
257 task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
258 GROUP_STOP_DEQUEUED);
262 * task_participate_group_stop - participate in a group stop
263 * @task: task participating in a group stop
265 * @task has GROUP_STOP_PENDING set and is participating in a group stop.
266 * Group stop states are cleared and the group stop count is consumed if
267 * %GROUP_STOP_CONSUME was set. If the consumption completes the group
268 * stop, the appropriate %SIGNAL_* flags are set.
271 * Must be called with @task->sighand->siglock held.
274 * %true if group stop completion should be notified to the parent, %false
277 static bool task_participate_group_stop(struct task_struct *task)
279 struct signal_struct *sig = task->signal;
280 bool consume = task->group_stop & GROUP_STOP_CONSUME;
282 WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
284 task_clear_group_stop_pending(task);
289 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
290 sig->group_stop_count--;
293 * Tell the caller to notify completion iff we are entering into a
294 * fresh group stop. Read comment in do_signal_stop() for details.
296 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
297 sig->flags = SIGNAL_STOP_STOPPED;
304 * allocate a new signal queue record
305 * - this may be called without locks if and only if t == current, otherwise an
306 * appopriate lock must be held to stop the target task from exiting
308 static struct sigqueue *
309 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
311 struct sigqueue *q = NULL;
312 struct user_struct *user;
315 * Protect access to @t credentials. This can go away when all
316 * callers hold rcu read lock.
319 user = get_uid(__task_cred(t)->user);
320 atomic_inc(&user->sigpending);
323 if (override_rlimit ||
324 atomic_read(&user->sigpending) <=
325 task_rlimit(t, RLIMIT_SIGPENDING)) {
326 q = kmem_cache_alloc(sigqueue_cachep, flags);
328 print_dropped_signal(sig);
331 if (unlikely(q == NULL)) {
332 atomic_dec(&user->sigpending);
335 INIT_LIST_HEAD(&q->list);
343 static void __sigqueue_free(struct sigqueue *q)
345 if (q->flags & SIGQUEUE_PREALLOC)
347 atomic_dec(&q->user->sigpending);
349 kmem_cache_free(sigqueue_cachep, q);
352 void flush_sigqueue(struct sigpending *queue)
356 sigemptyset(&queue->signal);
357 while (!list_empty(&queue->list)) {
358 q = list_entry(queue->list.next, struct sigqueue , list);
359 list_del_init(&q->list);
365 * Flush all pending signals for a task.
367 void __flush_signals(struct task_struct *t)
369 clear_tsk_thread_flag(t, TIF_SIGPENDING);
370 flush_sigqueue(&t->pending);
371 flush_sigqueue(&t->signal->shared_pending);
374 void flush_signals(struct task_struct *t)
378 spin_lock_irqsave(&t->sighand->siglock, flags);
380 spin_unlock_irqrestore(&t->sighand->siglock, flags);
383 static void __flush_itimer_signals(struct sigpending *pending)
385 sigset_t signal, retain;
386 struct sigqueue *q, *n;
388 signal = pending->signal;
389 sigemptyset(&retain);
391 list_for_each_entry_safe(q, n, &pending->list, list) {
392 int sig = q->info.si_signo;
394 if (likely(q->info.si_code != SI_TIMER)) {
395 sigaddset(&retain, sig);
397 sigdelset(&signal, sig);
398 list_del_init(&q->list);
403 sigorsets(&pending->signal, &signal, &retain);
406 void flush_itimer_signals(void)
408 struct task_struct *tsk = current;
411 spin_lock_irqsave(&tsk->sighand->siglock, flags);
412 __flush_itimer_signals(&tsk->pending);
413 __flush_itimer_signals(&tsk->signal->shared_pending);
414 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
417 void ignore_signals(struct task_struct *t)
421 for (i = 0; i < _NSIG; ++i)
422 t->sighand->action[i].sa.sa_handler = SIG_IGN;
428 * Flush all handlers for a task.
432 flush_signal_handlers(struct task_struct *t, int force_default)
435 struct k_sigaction *ka = &t->sighand->action[0];
436 for (i = _NSIG ; i != 0 ; i--) {
437 if (force_default || ka->sa.sa_handler != SIG_IGN)
438 ka->sa.sa_handler = SIG_DFL;
440 sigemptyset(&ka->sa.sa_mask);
445 int unhandled_signal(struct task_struct *tsk, int sig)
447 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
448 if (is_global_init(tsk))
450 if (handler != SIG_IGN && handler != SIG_DFL)
452 return !tracehook_consider_fatal_signal(tsk, sig);
456 /* Notify the system that a driver wants to block all signals for this
457 * process, and wants to be notified if any signals at all were to be
458 * sent/acted upon. If the notifier routine returns non-zero, then the
459 * signal will be acted upon after all. If the notifier routine returns 0,
460 * then then signal will be blocked. Only one block per process is
461 * allowed. priv is a pointer to private data that the notifier routine
462 * can use to determine if the signal should be blocked or not. */
465 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
469 spin_lock_irqsave(¤t->sighand->siglock, flags);
470 current->notifier_mask = mask;
471 current->notifier_data = priv;
472 current->notifier = notifier;
473 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
476 /* Notify the system that blocking has ended. */
479 unblock_all_signals(void)
483 spin_lock_irqsave(¤t->sighand->siglock, flags);
484 current->notifier = NULL;
485 current->notifier_data = NULL;
487 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
490 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
492 struct sigqueue *q, *first = NULL;
495 * Collect the siginfo appropriate to this signal. Check if
496 * there is another siginfo for the same signal.
498 list_for_each_entry(q, &list->list, list) {
499 if (q->info.si_signo == sig) {
506 sigdelset(&list->signal, sig);
510 list_del_init(&first->list);
511 copy_siginfo(info, &first->info);
512 __sigqueue_free(first);
514 /* Ok, it wasn't in the queue. This must be
515 a fast-pathed signal or we must have been
516 out of queue space. So zero out the info.
518 info->si_signo = sig;
520 info->si_code = SI_USER;
526 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
529 int sig = next_signal(pending, mask);
532 if (current->notifier) {
533 if (sigismember(current->notifier_mask, sig)) {
534 if (!(current->notifier)(current->notifier_data)) {
535 clear_thread_flag(TIF_SIGPENDING);
541 collect_signal(sig, pending, info);
548 * Dequeue a signal and return the element to the caller, which is
549 * expected to free it.
551 * All callers have to hold the siglock.
553 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
557 /* We only dequeue private signals from ourselves, we don't let
558 * signalfd steal them
560 signr = __dequeue_signal(&tsk->pending, mask, info);
562 signr = __dequeue_signal(&tsk->signal->shared_pending,
567 * itimers are process shared and we restart periodic
568 * itimers in the signal delivery path to prevent DoS
569 * attacks in the high resolution timer case. This is
570 * compliant with the old way of self restarting
571 * itimers, as the SIGALRM is a legacy signal and only
572 * queued once. Changing the restart behaviour to
573 * restart the timer in the signal dequeue path is
574 * reducing the timer noise on heavy loaded !highres
577 if (unlikely(signr == SIGALRM)) {
578 struct hrtimer *tmr = &tsk->signal->real_timer;
580 if (!hrtimer_is_queued(tmr) &&
581 tsk->signal->it_real_incr.tv64 != 0) {
582 hrtimer_forward(tmr, tmr->base->get_time(),
583 tsk->signal->it_real_incr);
584 hrtimer_restart(tmr);
593 if (unlikely(sig_kernel_stop(signr))) {
595 * Set a marker that we have dequeued a stop signal. Our
596 * caller might release the siglock and then the pending
597 * stop signal it is about to process is no longer in the
598 * pending bitmasks, but must still be cleared by a SIGCONT
599 * (and overruled by a SIGKILL). So those cases clear this
600 * shared flag after we've set it. Note that this flag may
601 * remain set after the signal we return is ignored or
602 * handled. That doesn't matter because its only purpose
603 * is to alert stop-signal processing code when another
604 * processor has come along and cleared the flag.
606 current->group_stop |= GROUP_STOP_DEQUEUED;
608 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
610 * Release the siglock to ensure proper locking order
611 * of timer locks outside of siglocks. Note, we leave
612 * irqs disabled here, since the posix-timers code is
613 * about to disable them again anyway.
615 spin_unlock(&tsk->sighand->siglock);
616 do_schedule_next_timer(info);
617 spin_lock(&tsk->sighand->siglock);
623 * Tell a process that it has a new active signal..
625 * NOTE! we rely on the previous spin_lock to
626 * lock interrupts for us! We can only be called with
627 * "siglock" held, and the local interrupt must
628 * have been disabled when that got acquired!
630 * No need to set need_resched since signal event passing
631 * goes through ->blocked
633 void signal_wake_up(struct task_struct *t, int resume)
637 set_tsk_thread_flag(t, TIF_SIGPENDING);
640 * For SIGKILL, we want to wake it up in the stopped/traced/killable
641 * case. We don't check t->state here because there is a race with it
642 * executing another processor and just now entering stopped state.
643 * By using wake_up_state, we ensure the process will wake up and
644 * handle its death signal.
646 mask = TASK_INTERRUPTIBLE;
648 mask |= TASK_WAKEKILL;
649 if (!wake_up_state(t, mask))
654 * Remove signals in mask from the pending set and queue.
655 * Returns 1 if any signals were found.
657 * All callers must be holding the siglock.
659 * This version takes a sigset mask and looks at all signals,
660 * not just those in the first mask word.
662 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
664 struct sigqueue *q, *n;
667 sigandsets(&m, mask, &s->signal);
668 if (sigisemptyset(&m))
671 signandsets(&s->signal, &s->signal, mask);
672 list_for_each_entry_safe(q, n, &s->list, list) {
673 if (sigismember(mask, q->info.si_signo)) {
674 list_del_init(&q->list);
681 * Remove signals in mask from the pending set and queue.
682 * Returns 1 if any signals were found.
684 * All callers must be holding the siglock.
686 static int rm_from_queue(unsigned long mask, struct sigpending *s)
688 struct sigqueue *q, *n;
690 if (!sigtestsetmask(&s->signal, mask))
693 sigdelsetmask(&s->signal, mask);
694 list_for_each_entry_safe(q, n, &s->list, list) {
695 if (q->info.si_signo < SIGRTMIN &&
696 (mask & sigmask(q->info.si_signo))) {
697 list_del_init(&q->list);
704 static inline int is_si_special(const struct siginfo *info)
706 return info <= SEND_SIG_FORCED;
709 static inline bool si_fromuser(const struct siginfo *info)
711 return info == SEND_SIG_NOINFO ||
712 (!is_si_special(info) && SI_FROMUSER(info));
716 * Bad permissions for sending the signal
717 * - the caller must hold the RCU read lock
719 static int check_kill_permission(int sig, struct siginfo *info,
720 struct task_struct *t)
722 const struct cred *cred, *tcred;
726 if (!valid_signal(sig))
729 if (!si_fromuser(info))
732 error = audit_signal_info(sig, t); /* Let audit system see the signal */
736 cred = current_cred();
737 tcred = __task_cred(t);
738 if (!same_thread_group(current, t) &&
739 (cred->euid ^ tcred->suid) &&
740 (cred->euid ^ tcred->uid) &&
741 (cred->uid ^ tcred->suid) &&
742 (cred->uid ^ tcred->uid) &&
743 !capable(CAP_KILL)) {
746 sid = task_session(t);
748 * We don't return the error if sid == NULL. The
749 * task was unhashed, the caller must notice this.
751 if (!sid || sid == task_session(current))
758 return security_task_kill(t, info, sig, 0);
762 * Handle magic process-wide effects of stop/continue signals. Unlike
763 * the signal actions, these happen immediately at signal-generation
764 * time regardless of blocking, ignoring, or handling. This does the
765 * actual continuing for SIGCONT, but not the actual stopping for stop
766 * signals. The process stop is done as a signal action for SIG_DFL.
768 * Returns true if the signal should be actually delivered, otherwise
769 * it should be dropped.
771 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
773 struct signal_struct *signal = p->signal;
774 struct task_struct *t;
776 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
778 * The process is in the middle of dying, nothing to do.
780 } else if (sig_kernel_stop(sig)) {
782 * This is a stop signal. Remove SIGCONT from all queues.
784 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
787 rm_from_queue(sigmask(SIGCONT), &t->pending);
788 } while_each_thread(p, t);
789 } else if (sig == SIGCONT) {
792 * Remove all stop signals from all queues, wake all threads.
794 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
797 task_clear_group_stop_pending(t);
798 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
799 wake_up_state(t, __TASK_STOPPED);
800 } while_each_thread(p, t);
803 * Notify the parent with CLD_CONTINUED if we were stopped.
805 * If we were in the middle of a group stop, we pretend it
806 * was already finished, and then continued. Since SIGCHLD
807 * doesn't queue we report only CLD_STOPPED, as if the next
808 * CLD_CONTINUED was dropped.
811 if (signal->flags & SIGNAL_STOP_STOPPED)
812 why |= SIGNAL_CLD_CONTINUED;
813 else if (signal->group_stop_count)
814 why |= SIGNAL_CLD_STOPPED;
818 * The first thread which returns from do_signal_stop()
819 * will take ->siglock, notice SIGNAL_CLD_MASK, and
820 * notify its parent. See get_signal_to_deliver().
822 signal->flags = why | SIGNAL_STOP_CONTINUED;
823 signal->group_stop_count = 0;
824 signal->group_exit_code = 0;
828 return !sig_ignored(p, sig, from_ancestor_ns);
832 * Test if P wants to take SIG. After we've checked all threads with this,
833 * it's equivalent to finding no threads not blocking SIG. Any threads not
834 * blocking SIG were ruled out because they are not running and already
835 * have pending signals. Such threads will dequeue from the shared queue
836 * as soon as they're available, so putting the signal on the shared queue
837 * will be equivalent to sending it to one such thread.
839 static inline int wants_signal(int sig, struct task_struct *p)
841 if (sigismember(&p->blocked, sig))
843 if (p->flags & PF_EXITING)
847 if (task_is_stopped_or_traced(p))
849 return task_curr(p) || !signal_pending(p);
852 static void complete_signal(int sig, struct task_struct *p, int group)
854 struct signal_struct *signal = p->signal;
855 struct task_struct *t;
858 * Now find a thread we can wake up to take the signal off the queue.
860 * If the main thread wants the signal, it gets first crack.
861 * Probably the least surprising to the average bear.
863 if (wants_signal(sig, p))
865 else if (!group || thread_group_empty(p))
867 * There is just one thread and it does not need to be woken.
868 * It will dequeue unblocked signals before it runs again.
873 * Otherwise try to find a suitable thread.
875 t = signal->curr_target;
876 while (!wants_signal(sig, t)) {
878 if (t == signal->curr_target)
880 * No thread needs to be woken.
881 * Any eligible threads will see
882 * the signal in the queue soon.
886 signal->curr_target = t;
890 * Found a killable thread. If the signal will be fatal,
891 * then start taking the whole group down immediately.
893 if (sig_fatal(p, sig) &&
894 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
895 !sigismember(&t->real_blocked, sig) &&
897 !tracehook_consider_fatal_signal(t, sig))) {
899 * This signal will be fatal to the whole group.
901 if (!sig_kernel_coredump(sig)) {
903 * Start a group exit and wake everybody up.
904 * This way we don't have other threads
905 * running and doing things after a slower
906 * thread has the fatal signal pending.
908 signal->flags = SIGNAL_GROUP_EXIT;
909 signal->group_exit_code = sig;
910 signal->group_stop_count = 0;
913 task_clear_group_stop_pending(t);
914 sigaddset(&t->pending.signal, SIGKILL);
915 signal_wake_up(t, 1);
916 } while_each_thread(p, t);
922 * The signal is already in the shared-pending queue.
923 * Tell the chosen thread to wake up and dequeue it.
925 signal_wake_up(t, sig == SIGKILL);
929 static inline int legacy_queue(struct sigpending *signals, int sig)
931 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
934 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
935 int group, int from_ancestor_ns)
937 struct sigpending *pending;
941 trace_signal_generate(sig, info, t);
943 assert_spin_locked(&t->sighand->siglock);
945 if (!prepare_signal(sig, t, from_ancestor_ns))
948 pending = group ? &t->signal->shared_pending : &t->pending;
950 * Short-circuit ignored signals and support queuing
951 * exactly one non-rt signal, so that we can get more
952 * detailed information about the cause of the signal.
954 if (legacy_queue(pending, sig))
957 * fast-pathed signals for kernel-internal things like SIGSTOP
960 if (info == SEND_SIG_FORCED)
963 /* Real-time signals must be queued if sent by sigqueue, or
964 some other real-time mechanism. It is implementation
965 defined whether kill() does so. We attempt to do so, on
966 the principle of least surprise, but since kill is not
967 allowed to fail with EAGAIN when low on memory we just
968 make sure at least one signal gets delivered and don't
969 pass on the info struct. */
972 override_rlimit = (is_si_special(info) || info->si_code >= 0);
976 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
979 list_add_tail(&q->list, &pending->list);
980 switch ((unsigned long) info) {
981 case (unsigned long) SEND_SIG_NOINFO:
982 q->info.si_signo = sig;
983 q->info.si_errno = 0;
984 q->info.si_code = SI_USER;
985 q->info.si_pid = task_tgid_nr_ns(current,
986 task_active_pid_ns(t));
987 q->info.si_uid = current_uid();
989 case (unsigned long) SEND_SIG_PRIV:
990 q->info.si_signo = sig;
991 q->info.si_errno = 0;
992 q->info.si_code = SI_KERNEL;
997 copy_siginfo(&q->info, info);
998 if (from_ancestor_ns)
1002 } else if (!is_si_special(info)) {
1003 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1005 * Queue overflow, abort. We may abort if the
1006 * signal was rt and sent by user using something
1007 * other than kill().
1009 trace_signal_overflow_fail(sig, group, info);
1013 * This is a silent loss of information. We still
1014 * send the signal, but the *info bits are lost.
1016 trace_signal_lose_info(sig, group, info);
1021 signalfd_notify(t, sig);
1022 sigaddset(&pending->signal, sig);
1023 complete_signal(sig, t, group);
1027 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1030 int from_ancestor_ns = 0;
1032 #ifdef CONFIG_PID_NS
1033 from_ancestor_ns = si_fromuser(info) &&
1034 !task_pid_nr_ns(current, task_active_pid_ns(t));
1037 return __send_signal(sig, info, t, group, from_ancestor_ns);
1040 static void print_fatal_signal(struct pt_regs *regs, int signr)
1042 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1043 current->comm, task_pid_nr(current), signr);
1045 #if defined(__i386__) && !defined(__arch_um__)
1046 printk("code at %08lx: ", regs->ip);
1049 for (i = 0; i < 16; i++) {
1052 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1054 printk("%02x ", insn);
1064 static int __init setup_print_fatal_signals(char *str)
1066 get_option (&str, &print_fatal_signals);
1071 __setup("print-fatal-signals=", setup_print_fatal_signals);
1074 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1076 return send_signal(sig, info, p, 1);
1080 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1082 return send_signal(sig, info, t, 0);
1085 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1088 unsigned long flags;
1091 if (lock_task_sighand(p, &flags)) {
1092 ret = send_signal(sig, info, p, group);
1093 unlock_task_sighand(p, &flags);
1100 * Force a signal that the process can't ignore: if necessary
1101 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1103 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1104 * since we do not want to have a signal handler that was blocked
1105 * be invoked when user space had explicitly blocked it.
1107 * We don't want to have recursive SIGSEGV's etc, for example,
1108 * that is why we also clear SIGNAL_UNKILLABLE.
1111 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1113 unsigned long int flags;
1114 int ret, blocked, ignored;
1115 struct k_sigaction *action;
1117 spin_lock_irqsave(&t->sighand->siglock, flags);
1118 action = &t->sighand->action[sig-1];
1119 ignored = action->sa.sa_handler == SIG_IGN;
1120 blocked = sigismember(&t->blocked, sig);
1121 if (blocked || ignored) {
1122 action->sa.sa_handler = SIG_DFL;
1124 sigdelset(&t->blocked, sig);
1125 recalc_sigpending_and_wake(t);
1128 if (action->sa.sa_handler == SIG_DFL)
1129 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1130 ret = specific_send_sig_info(sig, info, t);
1131 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1137 * Nuke all other threads in the group.
1139 int zap_other_threads(struct task_struct *p)
1141 struct task_struct *t = p;
1144 p->signal->group_stop_count = 0;
1146 while_each_thread(p, t) {
1147 task_clear_group_stop_pending(t);
1150 /* Don't bother with already dead threads */
1153 sigaddset(&t->pending.signal, SIGKILL);
1154 signal_wake_up(t, 1);
1160 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1161 unsigned long *flags)
1163 struct sighand_struct *sighand;
1167 sighand = rcu_dereference(tsk->sighand);
1168 if (unlikely(sighand == NULL))
1171 spin_lock_irqsave(&sighand->siglock, *flags);
1172 if (likely(sighand == tsk->sighand))
1174 spin_unlock_irqrestore(&sighand->siglock, *flags);
1182 * send signal info to all the members of a group
1184 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1189 ret = check_kill_permission(sig, info, p);
1193 ret = do_send_sig_info(sig, info, p, true);
1199 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1200 * control characters do (^C, ^Z etc)
1201 * - the caller must hold at least a readlock on tasklist_lock
1203 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1205 struct task_struct *p = NULL;
1206 int retval, success;
1210 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1211 int err = group_send_sig_info(sig, info, p);
1214 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1215 return success ? 0 : retval;
1218 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1221 struct task_struct *p;
1225 p = pid_task(pid, PIDTYPE_PID);
1227 error = group_send_sig_info(sig, info, p);
1228 if (unlikely(error == -ESRCH))
1230 * The task was unhashed in between, try again.
1231 * If it is dead, pid_task() will return NULL,
1232 * if we race with de_thread() it will find the
1243 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1247 error = kill_pid_info(sig, info, find_vpid(pid));
1252 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1253 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1254 uid_t uid, uid_t euid, u32 secid)
1257 struct task_struct *p;
1258 const struct cred *pcred;
1259 unsigned long flags;
1261 if (!valid_signal(sig))
1265 p = pid_task(pid, PIDTYPE_PID);
1270 pcred = __task_cred(p);
1271 if (si_fromuser(info) &&
1272 euid != pcred->suid && euid != pcred->uid &&
1273 uid != pcred->suid && uid != pcred->uid) {
1277 ret = security_task_kill(p, info, sig, secid);
1282 if (lock_task_sighand(p, &flags)) {
1283 ret = __send_signal(sig, info, p, 1, 0);
1284 unlock_task_sighand(p, &flags);
1292 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1295 * kill_something_info() interprets pid in interesting ways just like kill(2).
1297 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1298 * is probably wrong. Should make it like BSD or SYSV.
1301 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1307 ret = kill_pid_info(sig, info, find_vpid(pid));
1312 read_lock(&tasklist_lock);
1314 ret = __kill_pgrp_info(sig, info,
1315 pid ? find_vpid(-pid) : task_pgrp(current));
1317 int retval = 0, count = 0;
1318 struct task_struct * p;
1320 for_each_process(p) {
1321 if (task_pid_vnr(p) > 1 &&
1322 !same_thread_group(p, current)) {
1323 int err = group_send_sig_info(sig, info, p);
1329 ret = count ? retval : -ESRCH;
1331 read_unlock(&tasklist_lock);
1337 * These are for backward compatibility with the rest of the kernel source.
1341 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1344 * Make sure legacy kernel users don't send in bad values
1345 * (normal paths check this in check_kill_permission).
1347 if (!valid_signal(sig))
1350 return do_send_sig_info(sig, info, p, false);
1353 #define __si_special(priv) \
1354 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1357 send_sig(int sig, struct task_struct *p, int priv)
1359 return send_sig_info(sig, __si_special(priv), p);
1363 force_sig(int sig, struct task_struct *p)
1365 force_sig_info(sig, SEND_SIG_PRIV, p);
1369 * When things go south during signal handling, we
1370 * will force a SIGSEGV. And if the signal that caused
1371 * the problem was already a SIGSEGV, we'll want to
1372 * make sure we don't even try to deliver the signal..
1375 force_sigsegv(int sig, struct task_struct *p)
1377 if (sig == SIGSEGV) {
1378 unsigned long flags;
1379 spin_lock_irqsave(&p->sighand->siglock, flags);
1380 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1381 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1383 force_sig(SIGSEGV, p);
1387 int kill_pgrp(struct pid *pid, int sig, int priv)
1391 read_lock(&tasklist_lock);
1392 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1393 read_unlock(&tasklist_lock);
1397 EXPORT_SYMBOL(kill_pgrp);
1399 int kill_pid(struct pid *pid, int sig, int priv)
1401 return kill_pid_info(sig, __si_special(priv), pid);
1403 EXPORT_SYMBOL(kill_pid);
1406 * These functions support sending signals using preallocated sigqueue
1407 * structures. This is needed "because realtime applications cannot
1408 * afford to lose notifications of asynchronous events, like timer
1409 * expirations or I/O completions". In the case of Posix Timers
1410 * we allocate the sigqueue structure from the timer_create. If this
1411 * allocation fails we are able to report the failure to the application
1412 * with an EAGAIN error.
1414 struct sigqueue *sigqueue_alloc(void)
1416 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1419 q->flags |= SIGQUEUE_PREALLOC;
1424 void sigqueue_free(struct sigqueue *q)
1426 unsigned long flags;
1427 spinlock_t *lock = ¤t->sighand->siglock;
1429 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1431 * We must hold ->siglock while testing q->list
1432 * to serialize with collect_signal() or with
1433 * __exit_signal()->flush_sigqueue().
1435 spin_lock_irqsave(lock, flags);
1436 q->flags &= ~SIGQUEUE_PREALLOC;
1438 * If it is queued it will be freed when dequeued,
1439 * like the "regular" sigqueue.
1441 if (!list_empty(&q->list))
1443 spin_unlock_irqrestore(lock, flags);
1449 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1451 int sig = q->info.si_signo;
1452 struct sigpending *pending;
1453 unsigned long flags;
1456 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1459 if (!likely(lock_task_sighand(t, &flags)))
1462 ret = 1; /* the signal is ignored */
1463 if (!prepare_signal(sig, t, 0))
1467 if (unlikely(!list_empty(&q->list))) {
1469 * If an SI_TIMER entry is already queue just increment
1470 * the overrun count.
1472 BUG_ON(q->info.si_code != SI_TIMER);
1473 q->info.si_overrun++;
1476 q->info.si_overrun = 0;
1478 signalfd_notify(t, sig);
1479 pending = group ? &t->signal->shared_pending : &t->pending;
1480 list_add_tail(&q->list, &pending->list);
1481 sigaddset(&pending->signal, sig);
1482 complete_signal(sig, t, group);
1484 unlock_task_sighand(t, &flags);
1490 * Let a parent know about the death of a child.
1491 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1493 * Returns -1 if our parent ignored us and so we've switched to
1494 * self-reaping, or else @sig.
1496 int do_notify_parent(struct task_struct *tsk, int sig)
1498 struct siginfo info;
1499 unsigned long flags;
1500 struct sighand_struct *psig;
1505 /* do_notify_parent_cldstop should have been called instead. */
1506 BUG_ON(task_is_stopped_or_traced(tsk));
1508 BUG_ON(!task_ptrace(tsk) &&
1509 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1511 info.si_signo = sig;
1514 * we are under tasklist_lock here so our parent is tied to
1515 * us and cannot exit and release its namespace.
1517 * the only it can is to switch its nsproxy with sys_unshare,
1518 * bu uncharing pid namespaces is not allowed, so we'll always
1519 * see relevant namespace
1521 * write_lock() currently calls preempt_disable() which is the
1522 * same as rcu_read_lock(), but according to Oleg, this is not
1523 * correct to rely on this
1526 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1527 info.si_uid = __task_cred(tsk)->uid;
1530 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1531 tsk->signal->utime));
1532 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1533 tsk->signal->stime));
1535 info.si_status = tsk->exit_code & 0x7f;
1536 if (tsk->exit_code & 0x80)
1537 info.si_code = CLD_DUMPED;
1538 else if (tsk->exit_code & 0x7f)
1539 info.si_code = CLD_KILLED;
1541 info.si_code = CLD_EXITED;
1542 info.si_status = tsk->exit_code >> 8;
1545 psig = tsk->parent->sighand;
1546 spin_lock_irqsave(&psig->siglock, flags);
1547 if (!task_ptrace(tsk) && sig == SIGCHLD &&
1548 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1549 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1551 * We are exiting and our parent doesn't care. POSIX.1
1552 * defines special semantics for setting SIGCHLD to SIG_IGN
1553 * or setting the SA_NOCLDWAIT flag: we should be reaped
1554 * automatically and not left for our parent's wait4 call.
1555 * Rather than having the parent do it as a magic kind of
1556 * signal handler, we just set this to tell do_exit that we
1557 * can be cleaned up without becoming a zombie. Note that
1558 * we still call __wake_up_parent in this case, because a
1559 * blocked sys_wait4 might now return -ECHILD.
1561 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1562 * is implementation-defined: we do (if you don't want
1563 * it, just use SIG_IGN instead).
1565 ret = tsk->exit_signal = -1;
1566 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1569 if (valid_signal(sig) && sig > 0)
1570 __group_send_sig_info(sig, &info, tsk->parent);
1571 __wake_up_parent(tsk, tsk->parent);
1572 spin_unlock_irqrestore(&psig->siglock, flags);
1578 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1579 * @tsk: task reporting the state change
1580 * @for_ptracer: the notification is for ptracer
1581 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1583 * Notify @tsk's parent that the stopped/continued state has changed. If
1584 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1585 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1588 * Must be called with tasklist_lock at least read locked.
1590 static void do_notify_parent_cldstop(struct task_struct *tsk,
1591 bool for_ptracer, int why)
1593 struct siginfo info;
1594 unsigned long flags;
1595 struct task_struct *parent;
1596 struct sighand_struct *sighand;
1599 parent = tsk->parent;
1601 tsk = tsk->group_leader;
1602 parent = tsk->real_parent;
1605 info.si_signo = SIGCHLD;
1608 * see comment in do_notify_parent() abot the following 3 lines
1611 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1612 info.si_uid = __task_cred(tsk)->uid;
1615 info.si_utime = cputime_to_clock_t(tsk->utime);
1616 info.si_stime = cputime_to_clock_t(tsk->stime);
1621 info.si_status = SIGCONT;
1624 info.si_status = tsk->signal->group_exit_code & 0x7f;
1627 info.si_status = tsk->exit_code & 0x7f;
1633 sighand = parent->sighand;
1634 spin_lock_irqsave(&sighand->siglock, flags);
1635 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1636 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1637 __group_send_sig_info(SIGCHLD, &info, parent);
1639 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1641 __wake_up_parent(tsk, parent);
1642 spin_unlock_irqrestore(&sighand->siglock, flags);
1645 static inline int may_ptrace_stop(void)
1647 if (!likely(task_ptrace(current)))
1650 * Are we in the middle of do_coredump?
1651 * If so and our tracer is also part of the coredump stopping
1652 * is a deadlock situation, and pointless because our tracer
1653 * is dead so don't allow us to stop.
1654 * If SIGKILL was already sent before the caller unlocked
1655 * ->siglock we must see ->core_state != NULL. Otherwise it
1656 * is safe to enter schedule().
1658 if (unlikely(current->mm->core_state) &&
1659 unlikely(current->mm == current->parent->mm))
1666 * Return nonzero if there is a SIGKILL that should be waking us up.
1667 * Called with the siglock held.
1669 static int sigkill_pending(struct task_struct *tsk)
1671 return sigismember(&tsk->pending.signal, SIGKILL) ||
1672 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1676 * Test whether the target task of the usual cldstop notification - the
1677 * real_parent of @child - is in the same group as the ptracer.
1679 static bool real_parent_is_ptracer(struct task_struct *child)
1681 return same_thread_group(child->parent, child->real_parent);
1685 * This must be called with current->sighand->siglock held.
1687 * This should be the path for all ptrace stops.
1688 * We always set current->last_siginfo while stopped here.
1689 * That makes it a way to test a stopped process for
1690 * being ptrace-stopped vs being job-control-stopped.
1692 * If we actually decide not to stop at all because the tracer
1693 * is gone, we keep current->exit_code unless clear_code.
1695 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1696 __releases(¤t->sighand->siglock)
1697 __acquires(¤t->sighand->siglock)
1699 bool gstop_done = false;
1701 if (arch_ptrace_stop_needed(exit_code, info)) {
1703 * The arch code has something special to do before a
1704 * ptrace stop. This is allowed to block, e.g. for faults
1705 * on user stack pages. We can't keep the siglock while
1706 * calling arch_ptrace_stop, so we must release it now.
1707 * To preserve proper semantics, we must do this before
1708 * any signal bookkeeping like checking group_stop_count.
1709 * Meanwhile, a SIGKILL could come in before we retake the
1710 * siglock. That must prevent us from sleeping in TASK_TRACED.
1711 * So after regaining the lock, we must check for SIGKILL.
1713 spin_unlock_irq(¤t->sighand->siglock);
1714 arch_ptrace_stop(exit_code, info);
1715 spin_lock_irq(¤t->sighand->siglock);
1716 if (sigkill_pending(current))
1721 * If @why is CLD_STOPPED, we're trapping to participate in a group
1722 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1723 * while siglock was released for the arch hook, PENDING could be
1724 * clear now. We act as if SIGCONT is received after TASK_TRACED
1725 * is entered - ignore it.
1727 if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
1728 gstop_done = task_participate_group_stop(current);
1730 current->last_siginfo = info;
1731 current->exit_code = exit_code;
1734 * TRACED should be visible before TRAPPING is cleared; otherwise,
1735 * the tracer might fail do_wait().
1737 set_current_state(TASK_TRACED);
1740 * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
1741 * transition to TASK_TRACED should be atomic with respect to
1742 * siglock. This hsould be done after the arch hook as siglock is
1743 * released and regrabbed across it.
1745 task_clear_group_stop_trapping(current);
1747 spin_unlock_irq(¤t->sighand->siglock);
1748 read_lock(&tasklist_lock);
1749 if (may_ptrace_stop()) {
1751 * Notify parents of the stop.
1753 * While ptraced, there are two parents - the ptracer and
1754 * the real_parent of the group_leader. The ptracer should
1755 * know about every stop while the real parent is only
1756 * interested in the completion of group stop. The states
1757 * for the two don't interact with each other. Notify
1758 * separately unless they're gonna be duplicates.
1760 do_notify_parent_cldstop(current, true, why);
1761 if (gstop_done && !real_parent_is_ptracer(current))
1762 do_notify_parent_cldstop(current, false, why);
1765 * Don't want to allow preemption here, because
1766 * sys_ptrace() needs this task to be inactive.
1768 * XXX: implement read_unlock_no_resched().
1771 read_unlock(&tasklist_lock);
1772 preempt_enable_no_resched();
1776 * By the time we got the lock, our tracer went away.
1777 * Don't drop the lock yet, another tracer may come.
1779 * If @gstop_done, the ptracer went away between group stop
1780 * completion and here. During detach, it would have set
1781 * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
1782 * in do_signal_stop() on return, so notifying the real
1783 * parent of the group stop completion is enough.
1786 do_notify_parent_cldstop(current, false, why);
1788 __set_current_state(TASK_RUNNING);
1790 current->exit_code = 0;
1791 read_unlock(&tasklist_lock);
1795 * While in TASK_TRACED, we were considered "frozen enough".
1796 * Now that we woke up, it's crucial if we're supposed to be
1797 * frozen that we freeze now before running anything substantial.
1802 * We are back. Now reacquire the siglock before touching
1803 * last_siginfo, so that we are sure to have synchronized with
1804 * any signal-sending on another CPU that wants to examine it.
1806 spin_lock_irq(¤t->sighand->siglock);
1807 current->last_siginfo = NULL;
1810 * Queued signals ignored us while we were stopped for tracing.
1811 * So check for any that we should take before resuming user mode.
1812 * This sets TIF_SIGPENDING, but never clears it.
1814 recalc_sigpending_tsk(current);
1817 void ptrace_notify(int exit_code)
1821 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1823 memset(&info, 0, sizeof info);
1824 info.si_signo = SIGTRAP;
1825 info.si_code = exit_code;
1826 info.si_pid = task_pid_vnr(current);
1827 info.si_uid = current_uid();
1829 /* Let the debugger run. */
1830 spin_lock_irq(¤t->sighand->siglock);
1831 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1832 spin_unlock_irq(¤t->sighand->siglock);
1836 * This performs the stopping for SIGSTOP and other stop signals.
1837 * We have to stop all threads in the thread group.
1838 * Returns nonzero if we've actually stopped and released the siglock.
1839 * Returns zero if we didn't stop and still hold the siglock.
1841 static int do_signal_stop(int signr)
1843 struct signal_struct *sig = current->signal;
1845 if (!(current->group_stop & GROUP_STOP_PENDING)) {
1846 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
1847 struct task_struct *t;
1849 /* signr will be recorded in task->group_stop for retries */
1850 WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
1852 if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
1853 unlikely(signal_group_exit(sig)))
1856 * There is no group stop already in progress. We must
1859 * While ptraced, a task may be resumed while group stop is
1860 * still in effect and then receive a stop signal and
1861 * initiate another group stop. This deviates from the
1862 * usual behavior as two consecutive stop signals can't
1863 * cause two group stops when !ptraced. That is why we
1864 * also check !task_is_stopped(t) below.
1866 * The condition can be distinguished by testing whether
1867 * SIGNAL_STOP_STOPPED is already set. Don't generate
1868 * group_exit_code in such case.
1870 * This is not necessary for SIGNAL_STOP_CONTINUED because
1871 * an intervening stop signal is required to cause two
1872 * continued events regardless of ptrace.
1874 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1875 sig->group_exit_code = signr;
1877 WARN_ON_ONCE(!task_ptrace(current));
1879 current->group_stop &= ~GROUP_STOP_SIGMASK;
1880 current->group_stop |= signr | gstop;
1881 sig->group_stop_count = 1;
1882 for (t = next_thread(current); t != current;
1883 t = next_thread(t)) {
1884 t->group_stop &= ~GROUP_STOP_SIGMASK;
1886 * Setting state to TASK_STOPPED for a group
1887 * stop is always done with the siglock held,
1888 * so this check has no races.
1890 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
1891 t->group_stop |= signr | gstop;
1892 sig->group_stop_count++;
1893 signal_wake_up(t, 0);
1898 if (likely(!task_ptrace(current))) {
1902 * If there are no other threads in the group, or if there
1903 * is a group stop in progress and we are the last to stop,
1904 * report to the parent.
1906 if (task_participate_group_stop(current))
1907 notify = CLD_STOPPED;
1909 __set_current_state(TASK_STOPPED);
1910 spin_unlock_irq(¤t->sighand->siglock);
1913 * Notify the parent of the group stop completion. Because
1914 * we're not holding either the siglock or tasklist_lock
1915 * here, ptracer may attach inbetween; however, this is for
1916 * group stop and should always be delivered to the real
1917 * parent of the group leader. The new ptracer will get
1918 * its notification when this task transitions into
1922 read_lock(&tasklist_lock);
1923 do_notify_parent_cldstop(current, false, notify);
1924 read_unlock(&tasklist_lock);
1927 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1930 spin_lock_irq(¤t->sighand->siglock);
1932 ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
1933 CLD_STOPPED, 0, NULL);
1934 current->exit_code = 0;
1938 * GROUP_STOP_PENDING could be set if another group stop has
1939 * started since being woken up or ptrace wants us to transit
1940 * between TASK_STOPPED and TRACED. Retry group stop.
1942 if (current->group_stop & GROUP_STOP_PENDING) {
1943 WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
1947 /* PTRACE_ATTACH might have raced with task killing, clear trapping */
1948 task_clear_group_stop_trapping(current);
1950 spin_unlock_irq(¤t->sighand->siglock);
1952 tracehook_finish_jctl();
1957 static int ptrace_signal(int signr, siginfo_t *info,
1958 struct pt_regs *regs, void *cookie)
1960 if (!task_ptrace(current))
1963 ptrace_signal_deliver(regs, cookie);
1965 /* Let the debugger run. */
1966 ptrace_stop(signr, CLD_TRAPPED, 0, info);
1968 /* We're back. Did the debugger cancel the sig? */
1969 signr = current->exit_code;
1973 current->exit_code = 0;
1975 /* Update the siginfo structure if the signal has
1976 changed. If the debugger wanted something
1977 specific in the siginfo structure then it should
1978 have updated *info via PTRACE_SETSIGINFO. */
1979 if (signr != info->si_signo) {
1980 info->si_signo = signr;
1982 info->si_code = SI_USER;
1983 info->si_pid = task_pid_vnr(current->parent);
1984 info->si_uid = task_uid(current->parent);
1987 /* If the (new) signal is now blocked, requeue it. */
1988 if (sigismember(¤t->blocked, signr)) {
1989 specific_send_sig_info(signr, info, current);
1996 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1997 struct pt_regs *regs, void *cookie)
1999 struct sighand_struct *sighand = current->sighand;
2000 struct signal_struct *signal = current->signal;
2005 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2006 * While in TASK_STOPPED, we were considered "frozen enough".
2007 * Now that we woke up, it's crucial if we're supposed to be
2008 * frozen that we freeze now before running anything substantial.
2012 spin_lock_irq(&sighand->siglock);
2014 * Every stopped thread goes here after wakeup. Check to see if
2015 * we should notify the parent, prepare_signal(SIGCONT) encodes
2016 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2018 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2019 struct task_struct *leader;
2022 if (signal->flags & SIGNAL_CLD_CONTINUED)
2023 why = CLD_CONTINUED;
2027 signal->flags &= ~SIGNAL_CLD_MASK;
2029 spin_unlock_irq(&sighand->siglock);
2032 * Notify the parent that we're continuing. This event is
2033 * always per-process and doesn't make whole lot of sense
2034 * for ptracers, who shouldn't consume the state via
2035 * wait(2) either, but, for backward compatibility, notify
2036 * the ptracer of the group leader too unless it's gonna be
2039 read_lock(&tasklist_lock);
2041 do_notify_parent_cldstop(current, false, why);
2043 leader = current->group_leader;
2044 if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
2045 do_notify_parent_cldstop(leader, true, why);
2047 read_unlock(&tasklist_lock);
2053 struct k_sigaction *ka;
2055 * Tracing can induce an artifical signal and choose sigaction.
2056 * The return value in @signr determines the default action,
2057 * but @info->si_signo is the signal number we will report.
2059 signr = tracehook_get_signal(current, regs, info, return_ka);
2060 if (unlikely(signr < 0))
2062 if (unlikely(signr != 0))
2065 if (unlikely(current->group_stop &
2066 GROUP_STOP_PENDING) && do_signal_stop(0))
2069 signr = dequeue_signal(current, ¤t->blocked,
2073 break; /* will return 0 */
2075 if (signr != SIGKILL) {
2076 signr = ptrace_signal(signr, info,
2082 ka = &sighand->action[signr-1];
2085 /* Trace actually delivered signals. */
2086 trace_signal_deliver(signr, info, ka);
2088 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2090 if (ka->sa.sa_handler != SIG_DFL) {
2091 /* Run the handler. */
2094 if (ka->sa.sa_flags & SA_ONESHOT)
2095 ka->sa.sa_handler = SIG_DFL;
2097 break; /* will return non-zero "signr" value */
2101 * Now we are doing the default action for this signal.
2103 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2107 * Global init gets no signals it doesn't want.
2108 * Container-init gets no signals it doesn't want from same
2111 * Note that if global/container-init sees a sig_kernel_only()
2112 * signal here, the signal must have been generated internally
2113 * or must have come from an ancestor namespace. In either
2114 * case, the signal cannot be dropped.
2116 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2117 !sig_kernel_only(signr))
2120 if (sig_kernel_stop(signr)) {
2122 * The default action is to stop all threads in
2123 * the thread group. The job control signals
2124 * do nothing in an orphaned pgrp, but SIGSTOP
2125 * always works. Note that siglock needs to be
2126 * dropped during the call to is_orphaned_pgrp()
2127 * because of lock ordering with tasklist_lock.
2128 * This allows an intervening SIGCONT to be posted.
2129 * We need to check for that and bail out if necessary.
2131 if (signr != SIGSTOP) {
2132 spin_unlock_irq(&sighand->siglock);
2134 /* signals can be posted during this window */
2136 if (is_current_pgrp_orphaned())
2139 spin_lock_irq(&sighand->siglock);
2142 if (likely(do_signal_stop(info->si_signo))) {
2143 /* It released the siglock. */
2148 * We didn't actually stop, due to a race
2149 * with SIGCONT or something like that.
2154 spin_unlock_irq(&sighand->siglock);
2157 * Anything else is fatal, maybe with a core dump.
2159 current->flags |= PF_SIGNALED;
2161 if (sig_kernel_coredump(signr)) {
2162 if (print_fatal_signals)
2163 print_fatal_signal(regs, info->si_signo);
2165 * If it was able to dump core, this kills all
2166 * other threads in the group and synchronizes with
2167 * their demise. If we lost the race with another
2168 * thread getting here, it set group_exit_code
2169 * first and our do_group_exit call below will use
2170 * that value and ignore the one we pass it.
2172 do_coredump(info->si_signo, info->si_signo, regs);
2176 * Death signals, no core dump.
2178 do_group_exit(info->si_signo);
2181 spin_unlock_irq(&sighand->siglock);
2185 void exit_signals(struct task_struct *tsk)
2188 struct task_struct *t;
2190 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2191 tsk->flags |= PF_EXITING;
2195 spin_lock_irq(&tsk->sighand->siglock);
2197 * From now this task is not visible for group-wide signals,
2198 * see wants_signal(), do_signal_stop().
2200 tsk->flags |= PF_EXITING;
2201 if (!signal_pending(tsk))
2204 /* It could be that __group_complete_signal() choose us to
2205 * notify about group-wide signal. Another thread should be
2206 * woken now to take the signal since we will not.
2208 for (t = tsk; (t = next_thread(t)) != tsk; )
2209 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2210 recalc_sigpending_and_wake(t);
2212 if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
2213 task_participate_group_stop(tsk))
2214 group_stop = CLD_STOPPED;
2216 spin_unlock_irq(&tsk->sighand->siglock);
2219 * If group stop has completed, deliver the notification. This
2220 * should always go to the real parent of the group leader.
2222 if (unlikely(group_stop)) {
2223 read_lock(&tasklist_lock);
2224 do_notify_parent_cldstop(tsk, false, group_stop);
2225 read_unlock(&tasklist_lock);
2229 EXPORT_SYMBOL(recalc_sigpending);
2230 EXPORT_SYMBOL_GPL(dequeue_signal);
2231 EXPORT_SYMBOL(flush_signals);
2232 EXPORT_SYMBOL(force_sig);
2233 EXPORT_SYMBOL(send_sig);
2234 EXPORT_SYMBOL(send_sig_info);
2235 EXPORT_SYMBOL(sigprocmask);
2236 EXPORT_SYMBOL(block_all_signals);
2237 EXPORT_SYMBOL(unblock_all_signals);
2241 * System call entry points.
2244 SYSCALL_DEFINE0(restart_syscall)
2246 struct restart_block *restart = ¤t_thread_info()->restart_block;
2247 return restart->fn(restart);
2250 long do_no_restart_syscall(struct restart_block *param)
2256 * We don't need to get the kernel lock - this is all local to this
2257 * particular thread.. (and that's good, because this is _heavily_
2258 * used by various programs)
2262 * This is also useful for kernel threads that want to temporarily
2263 * (or permanently) block certain signals.
2265 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2266 * interface happily blocks "unblockable" signals like SIGKILL
2269 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2273 spin_lock_irq(¤t->sighand->siglock);
2275 *oldset = current->blocked;
2280 sigorsets(¤t->blocked, ¤t->blocked, set);
2283 signandsets(¤t->blocked, ¤t->blocked, set);
2286 current->blocked = *set;
2291 recalc_sigpending();
2292 spin_unlock_irq(¤t->sighand->siglock);
2297 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2298 sigset_t __user *, oset, size_t, sigsetsize)
2300 int error = -EINVAL;
2301 sigset_t old_set, new_set;
2303 /* XXX: Don't preclude handling different sized sigset_t's. */
2304 if (sigsetsize != sizeof(sigset_t))
2309 if (copy_from_user(&new_set, set, sizeof(*set)))
2311 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2313 error = sigprocmask(how, &new_set, &old_set);
2319 spin_lock_irq(¤t->sighand->siglock);
2320 old_set = current->blocked;
2321 spin_unlock_irq(¤t->sighand->siglock);
2325 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2333 long do_sigpending(void __user *set, unsigned long sigsetsize)
2335 long error = -EINVAL;
2338 if (sigsetsize > sizeof(sigset_t))
2341 spin_lock_irq(¤t->sighand->siglock);
2342 sigorsets(&pending, ¤t->pending.signal,
2343 ¤t->signal->shared_pending.signal);
2344 spin_unlock_irq(¤t->sighand->siglock);
2346 /* Outside the lock because only this thread touches it. */
2347 sigandsets(&pending, ¤t->blocked, &pending);
2350 if (!copy_to_user(set, &pending, sigsetsize))
2357 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2359 return do_sigpending(set, sigsetsize);
2362 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2364 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2368 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2370 if (from->si_code < 0)
2371 return __copy_to_user(to, from, sizeof(siginfo_t))
2374 * If you change siginfo_t structure, please be sure
2375 * this code is fixed accordingly.
2376 * Please remember to update the signalfd_copyinfo() function
2377 * inside fs/signalfd.c too, in case siginfo_t changes.
2378 * It should never copy any pad contained in the structure
2379 * to avoid security leaks, but must copy the generic
2380 * 3 ints plus the relevant union member.
2382 err = __put_user(from->si_signo, &to->si_signo);
2383 err |= __put_user(from->si_errno, &to->si_errno);
2384 err |= __put_user((short)from->si_code, &to->si_code);
2385 switch (from->si_code & __SI_MASK) {
2387 err |= __put_user(from->si_pid, &to->si_pid);
2388 err |= __put_user(from->si_uid, &to->si_uid);
2391 err |= __put_user(from->si_tid, &to->si_tid);
2392 err |= __put_user(from->si_overrun, &to->si_overrun);
2393 err |= __put_user(from->si_ptr, &to->si_ptr);
2396 err |= __put_user(from->si_band, &to->si_band);
2397 err |= __put_user(from->si_fd, &to->si_fd);
2400 err |= __put_user(from->si_addr, &to->si_addr);
2401 #ifdef __ARCH_SI_TRAPNO
2402 err |= __put_user(from->si_trapno, &to->si_trapno);
2404 #ifdef BUS_MCEERR_AO
2406 * Other callers might not initialize the si_lsb field,
2407 * so check explicitely for the right codes here.
2409 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2410 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2414 err |= __put_user(from->si_pid, &to->si_pid);
2415 err |= __put_user(from->si_uid, &to->si_uid);
2416 err |= __put_user(from->si_status, &to->si_status);
2417 err |= __put_user(from->si_utime, &to->si_utime);
2418 err |= __put_user(from->si_stime, &to->si_stime);
2420 case __SI_RT: /* This is not generated by the kernel as of now. */
2421 case __SI_MESGQ: /* But this is */
2422 err |= __put_user(from->si_pid, &to->si_pid);
2423 err |= __put_user(from->si_uid, &to->si_uid);
2424 err |= __put_user(from->si_ptr, &to->si_ptr);
2426 default: /* this is just in case for now ... */
2427 err |= __put_user(from->si_pid, &to->si_pid);
2428 err |= __put_user(from->si_uid, &to->si_uid);
2436 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2437 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2446 /* XXX: Don't preclude handling different sized sigset_t's. */
2447 if (sigsetsize != sizeof(sigset_t))
2450 if (copy_from_user(&these, uthese, sizeof(these)))
2454 * Invert the set of allowed signals to get those we
2457 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2461 if (copy_from_user(&ts, uts, sizeof(ts)))
2463 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2468 spin_lock_irq(¤t->sighand->siglock);
2469 sig = dequeue_signal(current, &these, &info);
2471 timeout = MAX_SCHEDULE_TIMEOUT;
2473 timeout = (timespec_to_jiffies(&ts)
2474 + (ts.tv_sec || ts.tv_nsec));
2477 /* None ready -- temporarily unblock those we're
2478 * interested while we are sleeping in so that we'll
2479 * be awakened when they arrive. */
2480 current->real_blocked = current->blocked;
2481 sigandsets(¤t->blocked, ¤t->blocked, &these);
2482 recalc_sigpending();
2483 spin_unlock_irq(¤t->sighand->siglock);
2485 timeout = schedule_timeout_interruptible(timeout);
2487 spin_lock_irq(¤t->sighand->siglock);
2488 sig = dequeue_signal(current, &these, &info);
2489 current->blocked = current->real_blocked;
2490 siginitset(¤t->real_blocked, 0);
2491 recalc_sigpending();
2494 spin_unlock_irq(¤t->sighand->siglock);
2499 if (copy_siginfo_to_user(uinfo, &info))
2511 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2513 struct siginfo info;
2515 info.si_signo = sig;
2517 info.si_code = SI_USER;
2518 info.si_pid = task_tgid_vnr(current);
2519 info.si_uid = current_uid();
2521 return kill_something_info(sig, &info, pid);
2525 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2527 struct task_struct *p;
2531 p = find_task_by_vpid(pid);
2532 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2533 error = check_kill_permission(sig, info, p);
2535 * The null signal is a permissions and process existence
2536 * probe. No signal is actually delivered.
2538 if (!error && sig) {
2539 error = do_send_sig_info(sig, info, p, false);
2541 * If lock_task_sighand() failed we pretend the task
2542 * dies after receiving the signal. The window is tiny,
2543 * and the signal is private anyway.
2545 if (unlikely(error == -ESRCH))
2554 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2556 struct siginfo info;
2558 info.si_signo = sig;
2560 info.si_code = SI_TKILL;
2561 info.si_pid = task_tgid_vnr(current);
2562 info.si_uid = current_uid();
2564 return do_send_specific(tgid, pid, sig, &info);
2568 * sys_tgkill - send signal to one specific thread
2569 * @tgid: the thread group ID of the thread
2570 * @pid: the PID of the thread
2571 * @sig: signal to be sent
2573 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2574 * exists but it's not belonging to the target process anymore. This
2575 * method solves the problem of threads exiting and PIDs getting reused.
2577 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2579 /* This is only valid for single tasks */
2580 if (pid <= 0 || tgid <= 0)
2583 return do_tkill(tgid, pid, sig);
2587 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2589 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2591 /* This is only valid for single tasks */
2595 return do_tkill(0, pid, sig);
2598 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2599 siginfo_t __user *, uinfo)
2603 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2606 /* Not even root can pretend to send signals from the kernel.
2607 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2609 if (info.si_code != SI_QUEUE) {
2610 /* We used to allow any < 0 si_code */
2611 WARN_ON_ONCE(info.si_code < 0);
2614 info.si_signo = sig;
2616 /* POSIX.1b doesn't mention process groups. */
2617 return kill_proc_info(sig, &info, pid);
2620 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2622 /* This is only valid for single tasks */
2623 if (pid <= 0 || tgid <= 0)
2626 /* Not even root can pretend to send signals from the kernel.
2627 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2629 if (info->si_code != SI_QUEUE) {
2630 /* We used to allow any < 0 si_code */
2631 WARN_ON_ONCE(info->si_code < 0);
2634 info->si_signo = sig;
2636 return do_send_specific(tgid, pid, sig, info);
2639 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2640 siginfo_t __user *, uinfo)
2644 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2647 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2650 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2652 struct task_struct *t = current;
2653 struct k_sigaction *k;
2656 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2659 k = &t->sighand->action[sig-1];
2661 spin_lock_irq(¤t->sighand->siglock);
2666 sigdelsetmask(&act->sa.sa_mask,
2667 sigmask(SIGKILL) | sigmask(SIGSTOP));
2671 * "Setting a signal action to SIG_IGN for a signal that is
2672 * pending shall cause the pending signal to be discarded,
2673 * whether or not it is blocked."
2675 * "Setting a signal action to SIG_DFL for a signal that is
2676 * pending and whose default action is to ignore the signal
2677 * (for example, SIGCHLD), shall cause the pending signal to
2678 * be discarded, whether or not it is blocked"
2680 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2682 sigaddset(&mask, sig);
2683 rm_from_queue_full(&mask, &t->signal->shared_pending);
2685 rm_from_queue_full(&mask, &t->pending);
2687 } while (t != current);
2691 spin_unlock_irq(¤t->sighand->siglock);
2696 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2701 oss.ss_sp = (void __user *) current->sas_ss_sp;
2702 oss.ss_size = current->sas_ss_size;
2703 oss.ss_flags = sas_ss_flags(sp);
2711 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2713 error = __get_user(ss_sp, &uss->ss_sp) |
2714 __get_user(ss_flags, &uss->ss_flags) |
2715 __get_user(ss_size, &uss->ss_size);
2720 if (on_sig_stack(sp))
2726 * Note - this code used to test ss_flags incorrectly
2727 * old code may have been written using ss_flags==0
2728 * to mean ss_flags==SS_ONSTACK (as this was the only
2729 * way that worked) - this fix preserves that older
2732 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2735 if (ss_flags == SS_DISABLE) {
2740 if (ss_size < MINSIGSTKSZ)
2744 current->sas_ss_sp = (unsigned long) ss_sp;
2745 current->sas_ss_size = ss_size;
2751 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2753 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2754 __put_user(oss.ss_size, &uoss->ss_size) |
2755 __put_user(oss.ss_flags, &uoss->ss_flags);
2762 #ifdef __ARCH_WANT_SYS_SIGPENDING
2764 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2766 return do_sigpending(set, sizeof(*set));
2771 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2772 /* Some platforms have their own version with special arguments others
2773 support only sys_rt_sigprocmask. */
2775 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2776 old_sigset_t __user *, oset)
2779 old_sigset_t old_set, new_set;
2783 if (copy_from_user(&new_set, set, sizeof(*set)))
2785 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2787 spin_lock_irq(¤t->sighand->siglock);
2788 old_set = current->blocked.sig[0];
2796 sigaddsetmask(¤t->blocked, new_set);
2799 sigdelsetmask(¤t->blocked, new_set);
2802 current->blocked.sig[0] = new_set;
2806 recalc_sigpending();
2807 spin_unlock_irq(¤t->sighand->siglock);
2813 old_set = current->blocked.sig[0];
2816 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2823 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2825 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2826 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2827 const struct sigaction __user *, act,
2828 struct sigaction __user *, oact,
2831 struct k_sigaction new_sa, old_sa;
2834 /* XXX: Don't preclude handling different sized sigset_t's. */
2835 if (sigsetsize != sizeof(sigset_t))
2839 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2843 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2846 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2852 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2854 #ifdef __ARCH_WANT_SYS_SGETMASK
2857 * For backwards compatibility. Functionality superseded by sigprocmask.
2859 SYSCALL_DEFINE0(sgetmask)
2862 return current->blocked.sig[0];
2865 SYSCALL_DEFINE1(ssetmask, int, newmask)
2869 spin_lock_irq(¤t->sighand->siglock);
2870 old = current->blocked.sig[0];
2872 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2874 recalc_sigpending();
2875 spin_unlock_irq(¤t->sighand->siglock);
2879 #endif /* __ARCH_WANT_SGETMASK */
2881 #ifdef __ARCH_WANT_SYS_SIGNAL
2883 * For backwards compatibility. Functionality superseded by sigaction.
2885 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2887 struct k_sigaction new_sa, old_sa;
2890 new_sa.sa.sa_handler = handler;
2891 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2892 sigemptyset(&new_sa.sa.sa_mask);
2894 ret = do_sigaction(sig, &new_sa, &old_sa);
2896 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2898 #endif /* __ARCH_WANT_SYS_SIGNAL */
2900 #ifdef __ARCH_WANT_SYS_PAUSE
2902 SYSCALL_DEFINE0(pause)
2904 current->state = TASK_INTERRUPTIBLE;
2906 return -ERESTARTNOHAND;
2911 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2912 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2916 /* XXX: Don't preclude handling different sized sigset_t's. */
2917 if (sigsetsize != sizeof(sigset_t))
2920 if (copy_from_user(&newset, unewset, sizeof(newset)))
2922 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2924 spin_lock_irq(¤t->sighand->siglock);
2925 current->saved_sigmask = current->blocked;
2926 current->blocked = newset;
2927 recalc_sigpending();
2928 spin_unlock_irq(¤t->sighand->siglock);
2930 current->state = TASK_INTERRUPTIBLE;
2932 set_restore_sigmask();
2933 return -ERESTARTNOHAND;
2935 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2937 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2942 void __init signals_init(void)
2944 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2947 #ifdef CONFIG_KGDB_KDB
2948 #include <linux/kdb.h>
2950 * kdb_send_sig_info - Allows kdb to send signals without exposing
2951 * signal internals. This function checks if the required locks are
2952 * available before calling the main signal code, to avoid kdb
2956 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2958 static struct task_struct *kdb_prev_t;
2960 if (!spin_trylock(&t->sighand->siglock)) {
2961 kdb_printf("Can't do kill command now.\n"
2962 "The sigmask lock is held somewhere else in "
2963 "kernel, try again later\n");
2966 spin_unlock(&t->sighand->siglock);
2967 new_t = kdb_prev_t != t;
2969 if (t->state != TASK_RUNNING && new_t) {
2970 kdb_printf("Process is not RUNNING, sending a signal from "
2971 "kdb risks deadlock\n"
2972 "on the run queue locks. "
2973 "The signal has _not_ been sent.\n"
2974 "Reissue the kill command if you want to risk "
2978 sig = info->si_signo;
2979 if (send_sig_info(sig, info, t))
2980 kdb_printf("Fail to deliver Signal %d to process %d.\n",
2983 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2985 #endif /* CONFIG_KGDB_KDB */