Merge branch 'for-linus' of git://git.infradead.org/ubi-2.6
[pandora-kernel.git] / kernel / signal.c
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *              Changes to use preallocated sigqueue structures
10  *              to allow signals to be sent reliably.
11  */
12
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
33
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h"      /* audit_signal_info() */
39
40 /*
41  * SLAB caches for signal bits.
42  */
43
44 static struct kmem_cache *sigqueue_cachep;
45
46 int print_fatal_signals __read_mostly;
47
48 static void __user *sig_handler(struct task_struct *t, int sig)
49 {
50         return t->sighand->action[sig - 1].sa.sa_handler;
51 }
52
53 static int sig_handler_ignored(void __user *handler, int sig)
54 {
55         /* Is it explicitly or implicitly ignored? */
56         return handler == SIG_IGN ||
57                 (handler == SIG_DFL && sig_kernel_ignore(sig));
58 }
59
60 static int sig_task_ignored(struct task_struct *t, int sig,
61                 int from_ancestor_ns)
62 {
63         void __user *handler;
64
65         handler = sig_handler(t, sig);
66
67         if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68                         handler == SIG_DFL && !from_ancestor_ns)
69                 return 1;
70
71         return sig_handler_ignored(handler, sig);
72 }
73
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75 {
76         /*
77          * Blocked signals are never ignored, since the
78          * signal handler may change by the time it is
79          * unblocked.
80          */
81         if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82                 return 0;
83
84         if (!sig_task_ignored(t, sig, from_ancestor_ns))
85                 return 0;
86
87         /*
88          * Tracers may want to know about even ignored signals.
89          */
90         return !tracehook_consider_ignored_signal(t, sig);
91 }
92
93 /*
94  * Re-calculate pending state from the set of locally pending
95  * signals, globally pending signals, and blocked signals.
96  */
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98 {
99         unsigned long ready;
100         long i;
101
102         switch (_NSIG_WORDS) {
103         default:
104                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105                         ready |= signal->sig[i] &~ blocked->sig[i];
106                 break;
107
108         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109                 ready |= signal->sig[2] &~ blocked->sig[2];
110                 ready |= signal->sig[1] &~ blocked->sig[1];
111                 ready |= signal->sig[0] &~ blocked->sig[0];
112                 break;
113
114         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115                 ready |= signal->sig[0] &~ blocked->sig[0];
116                 break;
117
118         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119         }
120         return ready != 0;
121 }
122
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
125 static int recalc_sigpending_tsk(struct task_struct *t)
126 {
127         if (t->signal->group_stop_count > 0 ||
128             PENDING(&t->pending, &t->blocked) ||
129             PENDING(&t->signal->shared_pending, &t->blocked)) {
130                 set_tsk_thread_flag(t, TIF_SIGPENDING);
131                 return 1;
132         }
133         /*
134          * We must never clear the flag in another thread, or in current
135          * when it's possible the current syscall is returning -ERESTART*.
136          * So we don't clear it here, and only callers who know they should do.
137          */
138         return 0;
139 }
140
141 /*
142  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143  * This is superfluous when called on current, the wakeup is a harmless no-op.
144  */
145 void recalc_sigpending_and_wake(struct task_struct *t)
146 {
147         if (recalc_sigpending_tsk(t))
148                 signal_wake_up(t, 0);
149 }
150
151 void recalc_sigpending(void)
152 {
153         if (unlikely(tracehook_force_sigpending()))
154                 set_thread_flag(TIF_SIGPENDING);
155         else if (!recalc_sigpending_tsk(current) && !freezing(current))
156                 clear_thread_flag(TIF_SIGPENDING);
157
158 }
159
160 /* Given the mask, find the first available signal that should be serviced. */
161
162 #define SYNCHRONOUS_MASK \
163         (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164          sigmask(SIGTRAP) | sigmask(SIGFPE))
165
166 int next_signal(struct sigpending *pending, sigset_t *mask)
167 {
168         unsigned long i, *s, *m, x;
169         int sig = 0;
170
171         s = pending->signal.sig;
172         m = mask->sig;
173
174         /*
175          * Handle the first word specially: it contains the
176          * synchronous signals that need to be dequeued first.
177          */
178         x = *s &~ *m;
179         if (x) {
180                 if (x & SYNCHRONOUS_MASK)
181                         x &= SYNCHRONOUS_MASK;
182                 sig = ffz(~x) + 1;
183                 return sig;
184         }
185
186         switch (_NSIG_WORDS) {
187         default:
188                 for (i = 1; i < _NSIG_WORDS; ++i) {
189                         x = *++s &~ *++m;
190                         if (!x)
191                                 continue;
192                         sig = ffz(~x) + i*_NSIG_BPW + 1;
193                         break;
194                 }
195                 break;
196
197         case 2:
198                 x = s[1] &~ m[1];
199                 if (!x)
200                         break;
201                 sig = ffz(~x) + _NSIG_BPW + 1;
202                 break;
203
204         case 1:
205                 /* Nothing to do */
206                 break;
207         }
208
209         return sig;
210 }
211
212 static inline void print_dropped_signal(int sig)
213 {
214         static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216         if (!print_fatal_signals)
217                 return;
218
219         if (!__ratelimit(&ratelimit_state))
220                 return;
221
222         printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223                                 current->comm, current->pid, sig);
224 }
225
226 /*
227  * allocate a new signal queue record
228  * - this may be called without locks if and only if t == current, otherwise an
229  *   appopriate lock must be held to stop the target task from exiting
230  */
231 static struct sigqueue *
232 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
233 {
234         struct sigqueue *q = NULL;
235         struct user_struct *user;
236
237         /*
238          * Protect access to @t credentials. This can go away when all
239          * callers hold rcu read lock.
240          */
241         rcu_read_lock();
242         user = get_uid(__task_cred(t)->user);
243         atomic_inc(&user->sigpending);
244         rcu_read_unlock();
245
246         if (override_rlimit ||
247             atomic_read(&user->sigpending) <=
248                         task_rlimit(t, RLIMIT_SIGPENDING)) {
249                 q = kmem_cache_alloc(sigqueue_cachep, flags);
250         } else {
251                 print_dropped_signal(sig);
252         }
253
254         if (unlikely(q == NULL)) {
255                 atomic_dec(&user->sigpending);
256                 free_uid(user);
257         } else {
258                 INIT_LIST_HEAD(&q->list);
259                 q->flags = 0;
260                 q->user = user;
261         }
262
263         return q;
264 }
265
266 static void __sigqueue_free(struct sigqueue *q)
267 {
268         if (q->flags & SIGQUEUE_PREALLOC)
269                 return;
270         atomic_dec(&q->user->sigpending);
271         free_uid(q->user);
272         kmem_cache_free(sigqueue_cachep, q);
273 }
274
275 void flush_sigqueue(struct sigpending *queue)
276 {
277         struct sigqueue *q;
278
279         sigemptyset(&queue->signal);
280         while (!list_empty(&queue->list)) {
281                 q = list_entry(queue->list.next, struct sigqueue , list);
282                 list_del_init(&q->list);
283                 __sigqueue_free(q);
284         }
285 }
286
287 /*
288  * Flush all pending signals for a task.
289  */
290 void __flush_signals(struct task_struct *t)
291 {
292         clear_tsk_thread_flag(t, TIF_SIGPENDING);
293         flush_sigqueue(&t->pending);
294         flush_sigqueue(&t->signal->shared_pending);
295 }
296
297 void flush_signals(struct task_struct *t)
298 {
299         unsigned long flags;
300
301         spin_lock_irqsave(&t->sighand->siglock, flags);
302         __flush_signals(t);
303         spin_unlock_irqrestore(&t->sighand->siglock, flags);
304 }
305
306 static void __flush_itimer_signals(struct sigpending *pending)
307 {
308         sigset_t signal, retain;
309         struct sigqueue *q, *n;
310
311         signal = pending->signal;
312         sigemptyset(&retain);
313
314         list_for_each_entry_safe(q, n, &pending->list, list) {
315                 int sig = q->info.si_signo;
316
317                 if (likely(q->info.si_code != SI_TIMER)) {
318                         sigaddset(&retain, sig);
319                 } else {
320                         sigdelset(&signal, sig);
321                         list_del_init(&q->list);
322                         __sigqueue_free(q);
323                 }
324         }
325
326         sigorsets(&pending->signal, &signal, &retain);
327 }
328
329 void flush_itimer_signals(void)
330 {
331         struct task_struct *tsk = current;
332         unsigned long flags;
333
334         spin_lock_irqsave(&tsk->sighand->siglock, flags);
335         __flush_itimer_signals(&tsk->pending);
336         __flush_itimer_signals(&tsk->signal->shared_pending);
337         spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
338 }
339
340 void ignore_signals(struct task_struct *t)
341 {
342         int i;
343
344         for (i = 0; i < _NSIG; ++i)
345                 t->sighand->action[i].sa.sa_handler = SIG_IGN;
346
347         flush_signals(t);
348 }
349
350 /*
351  * Flush all handlers for a task.
352  */
353
354 void
355 flush_signal_handlers(struct task_struct *t, int force_default)
356 {
357         int i;
358         struct k_sigaction *ka = &t->sighand->action[0];
359         for (i = _NSIG ; i != 0 ; i--) {
360                 if (force_default || ka->sa.sa_handler != SIG_IGN)
361                         ka->sa.sa_handler = SIG_DFL;
362                 ka->sa.sa_flags = 0;
363                 sigemptyset(&ka->sa.sa_mask);
364                 ka++;
365         }
366 }
367
368 int unhandled_signal(struct task_struct *tsk, int sig)
369 {
370         void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
371         if (is_global_init(tsk))
372                 return 1;
373         if (handler != SIG_IGN && handler != SIG_DFL)
374                 return 0;
375         return !tracehook_consider_fatal_signal(tsk, sig);
376 }
377
378
379 /* Notify the system that a driver wants to block all signals for this
380  * process, and wants to be notified if any signals at all were to be
381  * sent/acted upon.  If the notifier routine returns non-zero, then the
382  * signal will be acted upon after all.  If the notifier routine returns 0,
383  * then then signal will be blocked.  Only one block per process is
384  * allowed.  priv is a pointer to private data that the notifier routine
385  * can use to determine if the signal should be blocked or not.  */
386
387 void
388 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
389 {
390         unsigned long flags;
391
392         spin_lock_irqsave(&current->sighand->siglock, flags);
393         current->notifier_mask = mask;
394         current->notifier_data = priv;
395         current->notifier = notifier;
396         spin_unlock_irqrestore(&current->sighand->siglock, flags);
397 }
398
399 /* Notify the system that blocking has ended. */
400
401 void
402 unblock_all_signals(void)
403 {
404         unsigned long flags;
405
406         spin_lock_irqsave(&current->sighand->siglock, flags);
407         current->notifier = NULL;
408         current->notifier_data = NULL;
409         recalc_sigpending();
410         spin_unlock_irqrestore(&current->sighand->siglock, flags);
411 }
412
413 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
414 {
415         struct sigqueue *q, *first = NULL;
416
417         /*
418          * Collect the siginfo appropriate to this signal.  Check if
419          * there is another siginfo for the same signal.
420         */
421         list_for_each_entry(q, &list->list, list) {
422                 if (q->info.si_signo == sig) {
423                         if (first)
424                                 goto still_pending;
425                         first = q;
426                 }
427         }
428
429         sigdelset(&list->signal, sig);
430
431         if (first) {
432 still_pending:
433                 list_del_init(&first->list);
434                 copy_siginfo(info, &first->info);
435                 __sigqueue_free(first);
436         } else {
437                 /* Ok, it wasn't in the queue.  This must be
438                    a fast-pathed signal or we must have been
439                    out of queue space.  So zero out the info.
440                  */
441                 info->si_signo = sig;
442                 info->si_errno = 0;
443                 info->si_code = SI_USER;
444                 info->si_pid = 0;
445                 info->si_uid = 0;
446         }
447 }
448
449 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
450                         siginfo_t *info)
451 {
452         int sig = next_signal(pending, mask);
453
454         if (sig) {
455                 if (current->notifier) {
456                         if (sigismember(current->notifier_mask, sig)) {
457                                 if (!(current->notifier)(current->notifier_data)) {
458                                         clear_thread_flag(TIF_SIGPENDING);
459                                         return 0;
460                                 }
461                         }
462                 }
463
464                 collect_signal(sig, pending, info);
465         }
466
467         return sig;
468 }
469
470 /*
471  * Dequeue a signal and return the element to the caller, which is 
472  * expected to free it.
473  *
474  * All callers have to hold the siglock.
475  */
476 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
477 {
478         int signr;
479
480         /* We only dequeue private signals from ourselves, we don't let
481          * signalfd steal them
482          */
483         signr = __dequeue_signal(&tsk->pending, mask, info);
484         if (!signr) {
485                 signr = __dequeue_signal(&tsk->signal->shared_pending,
486                                          mask, info);
487                 /*
488                  * itimer signal ?
489                  *
490                  * itimers are process shared and we restart periodic
491                  * itimers in the signal delivery path to prevent DoS
492                  * attacks in the high resolution timer case. This is
493                  * compliant with the old way of self restarting
494                  * itimers, as the SIGALRM is a legacy signal and only
495                  * queued once. Changing the restart behaviour to
496                  * restart the timer in the signal dequeue path is
497                  * reducing the timer noise on heavy loaded !highres
498                  * systems too.
499                  */
500                 if (unlikely(signr == SIGALRM)) {
501                         struct hrtimer *tmr = &tsk->signal->real_timer;
502
503                         if (!hrtimer_is_queued(tmr) &&
504                             tsk->signal->it_real_incr.tv64 != 0) {
505                                 hrtimer_forward(tmr, tmr->base->get_time(),
506                                                 tsk->signal->it_real_incr);
507                                 hrtimer_restart(tmr);
508                         }
509                 }
510         }
511
512         recalc_sigpending();
513         if (!signr)
514                 return 0;
515
516         if (unlikely(sig_kernel_stop(signr))) {
517                 /*
518                  * Set a marker that we have dequeued a stop signal.  Our
519                  * caller might release the siglock and then the pending
520                  * stop signal it is about to process is no longer in the
521                  * pending bitmasks, but must still be cleared by a SIGCONT
522                  * (and overruled by a SIGKILL).  So those cases clear this
523                  * shared flag after we've set it.  Note that this flag may
524                  * remain set after the signal we return is ignored or
525                  * handled.  That doesn't matter because its only purpose
526                  * is to alert stop-signal processing code when another
527                  * processor has come along and cleared the flag.
528                  */
529                 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
530         }
531         if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
532                 /*
533                  * Release the siglock to ensure proper locking order
534                  * of timer locks outside of siglocks.  Note, we leave
535                  * irqs disabled here, since the posix-timers code is
536                  * about to disable them again anyway.
537                  */
538                 spin_unlock(&tsk->sighand->siglock);
539                 do_schedule_next_timer(info);
540                 spin_lock(&tsk->sighand->siglock);
541         }
542         return signr;
543 }
544
545 /*
546  * Tell a process that it has a new active signal..
547  *
548  * NOTE! we rely on the previous spin_lock to
549  * lock interrupts for us! We can only be called with
550  * "siglock" held, and the local interrupt must
551  * have been disabled when that got acquired!
552  *
553  * No need to set need_resched since signal event passing
554  * goes through ->blocked
555  */
556 void signal_wake_up(struct task_struct *t, int resume)
557 {
558         unsigned int mask;
559
560         set_tsk_thread_flag(t, TIF_SIGPENDING);
561
562         /*
563          * For SIGKILL, we want to wake it up in the stopped/traced/killable
564          * case. We don't check t->state here because there is a race with it
565          * executing another processor and just now entering stopped state.
566          * By using wake_up_state, we ensure the process will wake up and
567          * handle its death signal.
568          */
569         mask = TASK_INTERRUPTIBLE;
570         if (resume)
571                 mask |= TASK_WAKEKILL;
572         if (!wake_up_state(t, mask))
573                 kick_process(t);
574 }
575
576 /*
577  * Remove signals in mask from the pending set and queue.
578  * Returns 1 if any signals were found.
579  *
580  * All callers must be holding the siglock.
581  *
582  * This version takes a sigset mask and looks at all signals,
583  * not just those in the first mask word.
584  */
585 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
586 {
587         struct sigqueue *q, *n;
588         sigset_t m;
589
590         sigandsets(&m, mask, &s->signal);
591         if (sigisemptyset(&m))
592                 return 0;
593
594         signandsets(&s->signal, &s->signal, mask);
595         list_for_each_entry_safe(q, n, &s->list, list) {
596                 if (sigismember(mask, q->info.si_signo)) {
597                         list_del_init(&q->list);
598                         __sigqueue_free(q);
599                 }
600         }
601         return 1;
602 }
603 /*
604  * Remove signals in mask from the pending set and queue.
605  * Returns 1 if any signals were found.
606  *
607  * All callers must be holding the siglock.
608  */
609 static int rm_from_queue(unsigned long mask, struct sigpending *s)
610 {
611         struct sigqueue *q, *n;
612
613         if (!sigtestsetmask(&s->signal, mask))
614                 return 0;
615
616         sigdelsetmask(&s->signal, mask);
617         list_for_each_entry_safe(q, n, &s->list, list) {
618                 if (q->info.si_signo < SIGRTMIN &&
619                     (mask & sigmask(q->info.si_signo))) {
620                         list_del_init(&q->list);
621                         __sigqueue_free(q);
622                 }
623         }
624         return 1;
625 }
626
627 static inline int is_si_special(const struct siginfo *info)
628 {
629         return info <= SEND_SIG_FORCED;
630 }
631
632 static inline bool si_fromuser(const struct siginfo *info)
633 {
634         return info == SEND_SIG_NOINFO ||
635                 (!is_si_special(info) && SI_FROMUSER(info));
636 }
637
638 /*
639  * called with RCU read lock from check_kill_permission()
640  */
641 static int kill_ok_by_cred(struct task_struct *t)
642 {
643         const struct cred *cred = current_cred();
644         const struct cred *tcred = __task_cred(t);
645
646         if (cred->user->user_ns == tcred->user->user_ns &&
647             (cred->euid == tcred->suid ||
648              cred->euid == tcred->uid ||
649              cred->uid  == tcred->suid ||
650              cred->uid  == tcred->uid))
651                 return 1;
652
653         if (ns_capable(tcred->user->user_ns, CAP_KILL))
654                 return 1;
655
656         return 0;
657 }
658
659 /*
660  * Bad permissions for sending the signal
661  * - the caller must hold the RCU read lock
662  */
663 static int check_kill_permission(int sig, struct siginfo *info,
664                                  struct task_struct *t)
665 {
666         struct pid *sid;
667         int error;
668
669         if (!valid_signal(sig))
670                 return -EINVAL;
671
672         if (!si_fromuser(info))
673                 return 0;
674
675         error = audit_signal_info(sig, t); /* Let audit system see the signal */
676         if (error)
677                 return error;
678
679         if (!same_thread_group(current, t) &&
680             !kill_ok_by_cred(t)) {
681                 switch (sig) {
682                 case SIGCONT:
683                         sid = task_session(t);
684                         /*
685                          * We don't return the error if sid == NULL. The
686                          * task was unhashed, the caller must notice this.
687                          */
688                         if (!sid || sid == task_session(current))
689                                 break;
690                 default:
691                         return -EPERM;
692                 }
693         }
694
695         return security_task_kill(t, info, sig, 0);
696 }
697
698 /*
699  * Handle magic process-wide effects of stop/continue signals. Unlike
700  * the signal actions, these happen immediately at signal-generation
701  * time regardless of blocking, ignoring, or handling.  This does the
702  * actual continuing for SIGCONT, but not the actual stopping for stop
703  * signals. The process stop is done as a signal action for SIG_DFL.
704  *
705  * Returns true if the signal should be actually delivered, otherwise
706  * it should be dropped.
707  */
708 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
709 {
710         struct signal_struct *signal = p->signal;
711         struct task_struct *t;
712
713         if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
714                 /*
715                  * The process is in the middle of dying, nothing to do.
716                  */
717         } else if (sig_kernel_stop(sig)) {
718                 /*
719                  * This is a stop signal.  Remove SIGCONT from all queues.
720                  */
721                 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
722                 t = p;
723                 do {
724                         rm_from_queue(sigmask(SIGCONT), &t->pending);
725                 } while_each_thread(p, t);
726         } else if (sig == SIGCONT) {
727                 unsigned int why;
728                 /*
729                  * Remove all stop signals from all queues,
730                  * and wake all threads.
731                  */
732                 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
733                 t = p;
734                 do {
735                         unsigned int state;
736                         rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
737                         /*
738                          * If there is a handler for SIGCONT, we must make
739                          * sure that no thread returns to user mode before
740                          * we post the signal, in case it was the only
741                          * thread eligible to run the signal handler--then
742                          * it must not do anything between resuming and
743                          * running the handler.  With the TIF_SIGPENDING
744                          * flag set, the thread will pause and acquire the
745                          * siglock that we hold now and until we've queued
746                          * the pending signal.
747                          *
748                          * Wake up the stopped thread _after_ setting
749                          * TIF_SIGPENDING
750                          */
751                         state = __TASK_STOPPED;
752                         if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
753                                 set_tsk_thread_flag(t, TIF_SIGPENDING);
754                                 state |= TASK_INTERRUPTIBLE;
755                         }
756                         wake_up_state(t, state);
757                 } while_each_thread(p, t);
758
759                 /*
760                  * Notify the parent with CLD_CONTINUED if we were stopped.
761                  *
762                  * If we were in the middle of a group stop, we pretend it
763                  * was already finished, and then continued. Since SIGCHLD
764                  * doesn't queue we report only CLD_STOPPED, as if the next
765                  * CLD_CONTINUED was dropped.
766                  */
767                 why = 0;
768                 if (signal->flags & SIGNAL_STOP_STOPPED)
769                         why |= SIGNAL_CLD_CONTINUED;
770                 else if (signal->group_stop_count)
771                         why |= SIGNAL_CLD_STOPPED;
772
773                 if (why) {
774                         /*
775                          * The first thread which returns from do_signal_stop()
776                          * will take ->siglock, notice SIGNAL_CLD_MASK, and
777                          * notify its parent. See get_signal_to_deliver().
778                          */
779                         signal->flags = why | SIGNAL_STOP_CONTINUED;
780                         signal->group_stop_count = 0;
781                         signal->group_exit_code = 0;
782                 } else {
783                         /*
784                          * We are not stopped, but there could be a stop
785                          * signal in the middle of being processed after
786                          * being removed from the queue.  Clear that too.
787                          */
788                         signal->flags &= ~SIGNAL_STOP_DEQUEUED;
789                 }
790         }
791
792         return !sig_ignored(p, sig, from_ancestor_ns);
793 }
794
795 /*
796  * Test if P wants to take SIG.  After we've checked all threads with this,
797  * it's equivalent to finding no threads not blocking SIG.  Any threads not
798  * blocking SIG were ruled out because they are not running and already
799  * have pending signals.  Such threads will dequeue from the shared queue
800  * as soon as they're available, so putting the signal on the shared queue
801  * will be equivalent to sending it to one such thread.
802  */
803 static inline int wants_signal(int sig, struct task_struct *p)
804 {
805         if (sigismember(&p->blocked, sig))
806                 return 0;
807         if (p->flags & PF_EXITING)
808                 return 0;
809         if (sig == SIGKILL)
810                 return 1;
811         if (task_is_stopped_or_traced(p))
812                 return 0;
813         return task_curr(p) || !signal_pending(p);
814 }
815
816 static void complete_signal(int sig, struct task_struct *p, int group)
817 {
818         struct signal_struct *signal = p->signal;
819         struct task_struct *t;
820
821         /*
822          * Now find a thread we can wake up to take the signal off the queue.
823          *
824          * If the main thread wants the signal, it gets first crack.
825          * Probably the least surprising to the average bear.
826          */
827         if (wants_signal(sig, p))
828                 t = p;
829         else if (!group || thread_group_empty(p))
830                 /*
831                  * There is just one thread and it does not need to be woken.
832                  * It will dequeue unblocked signals before it runs again.
833                  */
834                 return;
835         else {
836                 /*
837                  * Otherwise try to find a suitable thread.
838                  */
839                 t = signal->curr_target;
840                 while (!wants_signal(sig, t)) {
841                         t = next_thread(t);
842                         if (t == signal->curr_target)
843                                 /*
844                                  * No thread needs to be woken.
845                                  * Any eligible threads will see
846                                  * the signal in the queue soon.
847                                  */
848                                 return;
849                 }
850                 signal->curr_target = t;
851         }
852
853         /*
854          * Found a killable thread.  If the signal will be fatal,
855          * then start taking the whole group down immediately.
856          */
857         if (sig_fatal(p, sig) &&
858             !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
859             !sigismember(&t->real_blocked, sig) &&
860             (sig == SIGKILL ||
861              !tracehook_consider_fatal_signal(t, sig))) {
862                 /*
863                  * This signal will be fatal to the whole group.
864                  */
865                 if (!sig_kernel_coredump(sig)) {
866                         /*
867                          * Start a group exit and wake everybody up.
868                          * This way we don't have other threads
869                          * running and doing things after a slower
870                          * thread has the fatal signal pending.
871                          */
872                         signal->flags = SIGNAL_GROUP_EXIT;
873                         signal->group_exit_code = sig;
874                         signal->group_stop_count = 0;
875                         t = p;
876                         do {
877                                 sigaddset(&t->pending.signal, SIGKILL);
878                                 signal_wake_up(t, 1);
879                         } while_each_thread(p, t);
880                         return;
881                 }
882         }
883
884         /*
885          * The signal is already in the shared-pending queue.
886          * Tell the chosen thread to wake up and dequeue it.
887          */
888         signal_wake_up(t, sig == SIGKILL);
889         return;
890 }
891
892 static inline int legacy_queue(struct sigpending *signals, int sig)
893 {
894         return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
895 }
896
897 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
898                         int group, int from_ancestor_ns)
899 {
900         struct sigpending *pending;
901         struct sigqueue *q;
902         int override_rlimit;
903
904         trace_signal_generate(sig, info, t);
905
906         assert_spin_locked(&t->sighand->siglock);
907
908         if (!prepare_signal(sig, t, from_ancestor_ns))
909                 return 0;
910
911         pending = group ? &t->signal->shared_pending : &t->pending;
912         /*
913          * Short-circuit ignored signals and support queuing
914          * exactly one non-rt signal, so that we can get more
915          * detailed information about the cause of the signal.
916          */
917         if (legacy_queue(pending, sig))
918                 return 0;
919         /*
920          * fast-pathed signals for kernel-internal things like SIGSTOP
921          * or SIGKILL.
922          */
923         if (info == SEND_SIG_FORCED)
924                 goto out_set;
925
926         /* Real-time signals must be queued if sent by sigqueue, or
927            some other real-time mechanism.  It is implementation
928            defined whether kill() does so.  We attempt to do so, on
929            the principle of least surprise, but since kill is not
930            allowed to fail with EAGAIN when low on memory we just
931            make sure at least one signal gets delivered and don't
932            pass on the info struct.  */
933
934         if (sig < SIGRTMIN)
935                 override_rlimit = (is_si_special(info) || info->si_code >= 0);
936         else
937                 override_rlimit = 0;
938
939         q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
940                 override_rlimit);
941         if (q) {
942                 list_add_tail(&q->list, &pending->list);
943                 switch ((unsigned long) info) {
944                 case (unsigned long) SEND_SIG_NOINFO:
945                         q->info.si_signo = sig;
946                         q->info.si_errno = 0;
947                         q->info.si_code = SI_USER;
948                         q->info.si_pid = task_tgid_nr_ns(current,
949                                                         task_active_pid_ns(t));
950                         q->info.si_uid = current_uid();
951                         break;
952                 case (unsigned long) SEND_SIG_PRIV:
953                         q->info.si_signo = sig;
954                         q->info.si_errno = 0;
955                         q->info.si_code = SI_KERNEL;
956                         q->info.si_pid = 0;
957                         q->info.si_uid = 0;
958                         break;
959                 default:
960                         copy_siginfo(&q->info, info);
961                         if (from_ancestor_ns)
962                                 q->info.si_pid = 0;
963                         break;
964                 }
965         } else if (!is_si_special(info)) {
966                 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
967                         /*
968                          * Queue overflow, abort.  We may abort if the
969                          * signal was rt and sent by user using something
970                          * other than kill().
971                          */
972                         trace_signal_overflow_fail(sig, group, info);
973                         return -EAGAIN;
974                 } else {
975                         /*
976                          * This is a silent loss of information.  We still
977                          * send the signal, but the *info bits are lost.
978                          */
979                         trace_signal_lose_info(sig, group, info);
980                 }
981         }
982
983 out_set:
984         signalfd_notify(t, sig);
985         sigaddset(&pending->signal, sig);
986         complete_signal(sig, t, group);
987         return 0;
988 }
989
990 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
991                         int group)
992 {
993         int from_ancestor_ns = 0;
994
995 #ifdef CONFIG_PID_NS
996         from_ancestor_ns = si_fromuser(info) &&
997                            !task_pid_nr_ns(current, task_active_pid_ns(t));
998 #endif
999
1000         return __send_signal(sig, info, t, group, from_ancestor_ns);
1001 }
1002
1003 static void print_fatal_signal(struct pt_regs *regs, int signr)
1004 {
1005         printk("%s/%d: potentially unexpected fatal signal %d.\n",
1006                 current->comm, task_pid_nr(current), signr);
1007
1008 #if defined(__i386__) && !defined(__arch_um__)
1009         printk("code at %08lx: ", regs->ip);
1010         {
1011                 int i;
1012                 for (i = 0; i < 16; i++) {
1013                         unsigned char insn;
1014
1015                         if (get_user(insn, (unsigned char *)(regs->ip + i)))
1016                                 break;
1017                         printk("%02x ", insn);
1018                 }
1019         }
1020 #endif
1021         printk("\n");
1022         preempt_disable();
1023         show_regs(regs);
1024         preempt_enable();
1025 }
1026
1027 static int __init setup_print_fatal_signals(char *str)
1028 {
1029         get_option (&str, &print_fatal_signals);
1030
1031         return 1;
1032 }
1033
1034 __setup("print-fatal-signals=", setup_print_fatal_signals);
1035
1036 int
1037 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1038 {
1039         return send_signal(sig, info, p, 1);
1040 }
1041
1042 static int
1043 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1044 {
1045         return send_signal(sig, info, t, 0);
1046 }
1047
1048 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1049                         bool group)
1050 {
1051         unsigned long flags;
1052         int ret = -ESRCH;
1053
1054         if (lock_task_sighand(p, &flags)) {
1055                 ret = send_signal(sig, info, p, group);
1056                 unlock_task_sighand(p, &flags);
1057         }
1058
1059         return ret;
1060 }
1061
1062 /*
1063  * Force a signal that the process can't ignore: if necessary
1064  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1065  *
1066  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1067  * since we do not want to have a signal handler that was blocked
1068  * be invoked when user space had explicitly blocked it.
1069  *
1070  * We don't want to have recursive SIGSEGV's etc, for example,
1071  * that is why we also clear SIGNAL_UNKILLABLE.
1072  */
1073 int
1074 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1075 {
1076         unsigned long int flags;
1077         int ret, blocked, ignored;
1078         struct k_sigaction *action;
1079
1080         spin_lock_irqsave(&t->sighand->siglock, flags);
1081         action = &t->sighand->action[sig-1];
1082         ignored = action->sa.sa_handler == SIG_IGN;
1083         blocked = sigismember(&t->blocked, sig);
1084         if (blocked || ignored) {
1085                 action->sa.sa_handler = SIG_DFL;
1086                 if (blocked) {
1087                         sigdelset(&t->blocked, sig);
1088                         recalc_sigpending_and_wake(t);
1089                 }
1090         }
1091         if (action->sa.sa_handler == SIG_DFL)
1092                 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1093         ret = specific_send_sig_info(sig, info, t);
1094         spin_unlock_irqrestore(&t->sighand->siglock, flags);
1095
1096         return ret;
1097 }
1098
1099 /*
1100  * Nuke all other threads in the group.
1101  */
1102 int zap_other_threads(struct task_struct *p)
1103 {
1104         struct task_struct *t = p;
1105         int count = 0;
1106
1107         p->signal->group_stop_count = 0;
1108
1109         while_each_thread(p, t) {
1110                 count++;
1111
1112                 /* Don't bother with already dead threads */
1113                 if (t->exit_state)
1114                         continue;
1115                 sigaddset(&t->pending.signal, SIGKILL);
1116                 signal_wake_up(t, 1);
1117         }
1118
1119         return count;
1120 }
1121
1122 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1123                                            unsigned long *flags)
1124 {
1125         struct sighand_struct *sighand;
1126
1127         rcu_read_lock();
1128         for (;;) {
1129                 sighand = rcu_dereference(tsk->sighand);
1130                 if (unlikely(sighand == NULL))
1131                         break;
1132
1133                 spin_lock_irqsave(&sighand->siglock, *flags);
1134                 if (likely(sighand == tsk->sighand))
1135                         break;
1136                 spin_unlock_irqrestore(&sighand->siglock, *flags);
1137         }
1138         rcu_read_unlock();
1139
1140         return sighand;
1141 }
1142
1143 /*
1144  * send signal info to all the members of a group
1145  */
1146 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1147 {
1148         int ret;
1149
1150         rcu_read_lock();
1151         ret = check_kill_permission(sig, info, p);
1152         rcu_read_unlock();
1153
1154         if (!ret && sig)
1155                 ret = do_send_sig_info(sig, info, p, true);
1156
1157         return ret;
1158 }
1159
1160 /*
1161  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1162  * control characters do (^C, ^Z etc)
1163  * - the caller must hold at least a readlock on tasklist_lock
1164  */
1165 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1166 {
1167         struct task_struct *p = NULL;
1168         int retval, success;
1169
1170         success = 0;
1171         retval = -ESRCH;
1172         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1173                 int err = group_send_sig_info(sig, info, p);
1174                 success |= !err;
1175                 retval = err;
1176         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1177         return success ? 0 : retval;
1178 }
1179
1180 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1181 {
1182         int error = -ESRCH;
1183         struct task_struct *p;
1184
1185         rcu_read_lock();
1186 retry:
1187         p = pid_task(pid, PIDTYPE_PID);
1188         if (p) {
1189                 error = group_send_sig_info(sig, info, p);
1190                 if (unlikely(error == -ESRCH))
1191                         /*
1192                          * The task was unhashed in between, try again.
1193                          * If it is dead, pid_task() will return NULL,
1194                          * if we race with de_thread() it will find the
1195                          * new leader.
1196                          */
1197                         goto retry;
1198         }
1199         rcu_read_unlock();
1200
1201         return error;
1202 }
1203
1204 int
1205 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1206 {
1207         int error;
1208         rcu_read_lock();
1209         error = kill_pid_info(sig, info, find_vpid(pid));
1210         rcu_read_unlock();
1211         return error;
1212 }
1213
1214 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1215 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1216                       uid_t uid, uid_t euid, u32 secid)
1217 {
1218         int ret = -EINVAL;
1219         struct task_struct *p;
1220         const struct cred *pcred;
1221         unsigned long flags;
1222
1223         if (!valid_signal(sig))
1224                 return ret;
1225
1226         rcu_read_lock();
1227         p = pid_task(pid, PIDTYPE_PID);
1228         if (!p) {
1229                 ret = -ESRCH;
1230                 goto out_unlock;
1231         }
1232         pcred = __task_cred(p);
1233         if (si_fromuser(info) &&
1234             euid != pcred->suid && euid != pcred->uid &&
1235             uid  != pcred->suid && uid  != pcred->uid) {
1236                 ret = -EPERM;
1237                 goto out_unlock;
1238         }
1239         ret = security_task_kill(p, info, sig, secid);
1240         if (ret)
1241                 goto out_unlock;
1242
1243         if (sig) {
1244                 if (lock_task_sighand(p, &flags)) {
1245                         ret = __send_signal(sig, info, p, 1, 0);
1246                         unlock_task_sighand(p, &flags);
1247                 } else
1248                         ret = -ESRCH;
1249         }
1250 out_unlock:
1251         rcu_read_unlock();
1252         return ret;
1253 }
1254 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1255
1256 /*
1257  * kill_something_info() interprets pid in interesting ways just like kill(2).
1258  *
1259  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1260  * is probably wrong.  Should make it like BSD or SYSV.
1261  */
1262
1263 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1264 {
1265         int ret;
1266
1267         if (pid > 0) {
1268                 rcu_read_lock();
1269                 ret = kill_pid_info(sig, info, find_vpid(pid));
1270                 rcu_read_unlock();
1271                 return ret;
1272         }
1273
1274         read_lock(&tasklist_lock);
1275         if (pid != -1) {
1276                 ret = __kill_pgrp_info(sig, info,
1277                                 pid ? find_vpid(-pid) : task_pgrp(current));
1278         } else {
1279                 int retval = 0, count = 0;
1280                 struct task_struct * p;
1281
1282                 for_each_process(p) {
1283                         if (task_pid_vnr(p) > 1 &&
1284                                         !same_thread_group(p, current)) {
1285                                 int err = group_send_sig_info(sig, info, p);
1286                                 ++count;
1287                                 if (err != -EPERM)
1288                                         retval = err;
1289                         }
1290                 }
1291                 ret = count ? retval : -ESRCH;
1292         }
1293         read_unlock(&tasklist_lock);
1294
1295         return ret;
1296 }
1297
1298 /*
1299  * These are for backward compatibility with the rest of the kernel source.
1300  */
1301
1302 int
1303 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1304 {
1305         /*
1306          * Make sure legacy kernel users don't send in bad values
1307          * (normal paths check this in check_kill_permission).
1308          */
1309         if (!valid_signal(sig))
1310                 return -EINVAL;
1311
1312         return do_send_sig_info(sig, info, p, false);
1313 }
1314
1315 #define __si_special(priv) \
1316         ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1317
1318 int
1319 send_sig(int sig, struct task_struct *p, int priv)
1320 {
1321         return send_sig_info(sig, __si_special(priv), p);
1322 }
1323
1324 void
1325 force_sig(int sig, struct task_struct *p)
1326 {
1327         force_sig_info(sig, SEND_SIG_PRIV, p);
1328 }
1329
1330 /*
1331  * When things go south during signal handling, we
1332  * will force a SIGSEGV. And if the signal that caused
1333  * the problem was already a SIGSEGV, we'll want to
1334  * make sure we don't even try to deliver the signal..
1335  */
1336 int
1337 force_sigsegv(int sig, struct task_struct *p)
1338 {
1339         if (sig == SIGSEGV) {
1340                 unsigned long flags;
1341                 spin_lock_irqsave(&p->sighand->siglock, flags);
1342                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1343                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1344         }
1345         force_sig(SIGSEGV, p);
1346         return 0;
1347 }
1348
1349 int kill_pgrp(struct pid *pid, int sig, int priv)
1350 {
1351         int ret;
1352
1353         read_lock(&tasklist_lock);
1354         ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1355         read_unlock(&tasklist_lock);
1356
1357         return ret;
1358 }
1359 EXPORT_SYMBOL(kill_pgrp);
1360
1361 int kill_pid(struct pid *pid, int sig, int priv)
1362 {
1363         return kill_pid_info(sig, __si_special(priv), pid);
1364 }
1365 EXPORT_SYMBOL(kill_pid);
1366
1367 /*
1368  * These functions support sending signals using preallocated sigqueue
1369  * structures.  This is needed "because realtime applications cannot
1370  * afford to lose notifications of asynchronous events, like timer
1371  * expirations or I/O completions".  In the case of Posix Timers
1372  * we allocate the sigqueue structure from the timer_create.  If this
1373  * allocation fails we are able to report the failure to the application
1374  * with an EAGAIN error.
1375  */
1376 struct sigqueue *sigqueue_alloc(void)
1377 {
1378         struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1379
1380         if (q)
1381                 q->flags |= SIGQUEUE_PREALLOC;
1382
1383         return q;
1384 }
1385
1386 void sigqueue_free(struct sigqueue *q)
1387 {
1388         unsigned long flags;
1389         spinlock_t *lock = &current->sighand->siglock;
1390
1391         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1392         /*
1393          * We must hold ->siglock while testing q->list
1394          * to serialize with collect_signal() or with
1395          * __exit_signal()->flush_sigqueue().
1396          */
1397         spin_lock_irqsave(lock, flags);
1398         q->flags &= ~SIGQUEUE_PREALLOC;
1399         /*
1400          * If it is queued it will be freed when dequeued,
1401          * like the "regular" sigqueue.
1402          */
1403         if (!list_empty(&q->list))
1404                 q = NULL;
1405         spin_unlock_irqrestore(lock, flags);
1406
1407         if (q)
1408                 __sigqueue_free(q);
1409 }
1410
1411 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1412 {
1413         int sig = q->info.si_signo;
1414         struct sigpending *pending;
1415         unsigned long flags;
1416         int ret;
1417
1418         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1419
1420         ret = -1;
1421         if (!likely(lock_task_sighand(t, &flags)))
1422                 goto ret;
1423
1424         ret = 1; /* the signal is ignored */
1425         if (!prepare_signal(sig, t, 0))
1426                 goto out;
1427
1428         ret = 0;
1429         if (unlikely(!list_empty(&q->list))) {
1430                 /*
1431                  * If an SI_TIMER entry is already queue just increment
1432                  * the overrun count.
1433                  */
1434                 BUG_ON(q->info.si_code != SI_TIMER);
1435                 q->info.si_overrun++;
1436                 goto out;
1437         }
1438         q->info.si_overrun = 0;
1439
1440         signalfd_notify(t, sig);
1441         pending = group ? &t->signal->shared_pending : &t->pending;
1442         list_add_tail(&q->list, &pending->list);
1443         sigaddset(&pending->signal, sig);
1444         complete_signal(sig, t, group);
1445 out:
1446         unlock_task_sighand(t, &flags);
1447 ret:
1448         return ret;
1449 }
1450
1451 /*
1452  * Let a parent know about the death of a child.
1453  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1454  *
1455  * Returns -1 if our parent ignored us and so we've switched to
1456  * self-reaping, or else @sig.
1457  */
1458 int do_notify_parent(struct task_struct *tsk, int sig)
1459 {
1460         struct siginfo info;
1461         unsigned long flags;
1462         struct sighand_struct *psig;
1463         int ret = sig;
1464
1465         BUG_ON(sig == -1);
1466
1467         /* do_notify_parent_cldstop should have been called instead.  */
1468         BUG_ON(task_is_stopped_or_traced(tsk));
1469
1470         BUG_ON(!task_ptrace(tsk) &&
1471                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1472
1473         info.si_signo = sig;
1474         info.si_errno = 0;
1475         /*
1476          * we are under tasklist_lock here so our parent is tied to
1477          * us and cannot exit and release its namespace.
1478          *
1479          * the only it can is to switch its nsproxy with sys_unshare,
1480          * bu uncharing pid namespaces is not allowed, so we'll always
1481          * see relevant namespace
1482          *
1483          * write_lock() currently calls preempt_disable() which is the
1484          * same as rcu_read_lock(), but according to Oleg, this is not
1485          * correct to rely on this
1486          */
1487         rcu_read_lock();
1488         info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1489         info.si_uid = __task_cred(tsk)->uid;
1490         rcu_read_unlock();
1491
1492         info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1493                                 tsk->signal->utime));
1494         info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1495                                 tsk->signal->stime));
1496
1497         info.si_status = tsk->exit_code & 0x7f;
1498         if (tsk->exit_code & 0x80)
1499                 info.si_code = CLD_DUMPED;
1500         else if (tsk->exit_code & 0x7f)
1501                 info.si_code = CLD_KILLED;
1502         else {
1503                 info.si_code = CLD_EXITED;
1504                 info.si_status = tsk->exit_code >> 8;
1505         }
1506
1507         psig = tsk->parent->sighand;
1508         spin_lock_irqsave(&psig->siglock, flags);
1509         if (!task_ptrace(tsk) && sig == SIGCHLD &&
1510             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1511              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1512                 /*
1513                  * We are exiting and our parent doesn't care.  POSIX.1
1514                  * defines special semantics for setting SIGCHLD to SIG_IGN
1515                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1516                  * automatically and not left for our parent's wait4 call.
1517                  * Rather than having the parent do it as a magic kind of
1518                  * signal handler, we just set this to tell do_exit that we
1519                  * can be cleaned up without becoming a zombie.  Note that
1520                  * we still call __wake_up_parent in this case, because a
1521                  * blocked sys_wait4 might now return -ECHILD.
1522                  *
1523                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1524                  * is implementation-defined: we do (if you don't want
1525                  * it, just use SIG_IGN instead).
1526                  */
1527                 ret = tsk->exit_signal = -1;
1528                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1529                         sig = -1;
1530         }
1531         if (valid_signal(sig) && sig > 0)
1532                 __group_send_sig_info(sig, &info, tsk->parent);
1533         __wake_up_parent(tsk, tsk->parent);
1534         spin_unlock_irqrestore(&psig->siglock, flags);
1535
1536         return ret;
1537 }
1538
1539 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1540 {
1541         struct siginfo info;
1542         unsigned long flags;
1543         struct task_struct *parent;
1544         struct sighand_struct *sighand;
1545
1546         if (task_ptrace(tsk))
1547                 parent = tsk->parent;
1548         else {
1549                 tsk = tsk->group_leader;
1550                 parent = tsk->real_parent;
1551         }
1552
1553         info.si_signo = SIGCHLD;
1554         info.si_errno = 0;
1555         /*
1556          * see comment in do_notify_parent() abot the following 3 lines
1557          */
1558         rcu_read_lock();
1559         info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1560         info.si_uid = __task_cred(tsk)->uid;
1561         rcu_read_unlock();
1562
1563         info.si_utime = cputime_to_clock_t(tsk->utime);
1564         info.si_stime = cputime_to_clock_t(tsk->stime);
1565
1566         info.si_code = why;
1567         switch (why) {
1568         case CLD_CONTINUED:
1569                 info.si_status = SIGCONT;
1570                 break;
1571         case CLD_STOPPED:
1572                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1573                 break;
1574         case CLD_TRAPPED:
1575                 info.si_status = tsk->exit_code & 0x7f;
1576                 break;
1577         default:
1578                 BUG();
1579         }
1580
1581         sighand = parent->sighand;
1582         spin_lock_irqsave(&sighand->siglock, flags);
1583         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1584             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1585                 __group_send_sig_info(SIGCHLD, &info, parent);
1586         /*
1587          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1588          */
1589         __wake_up_parent(tsk, parent);
1590         spin_unlock_irqrestore(&sighand->siglock, flags);
1591 }
1592
1593 static inline int may_ptrace_stop(void)
1594 {
1595         if (!likely(task_ptrace(current)))
1596                 return 0;
1597         /*
1598          * Are we in the middle of do_coredump?
1599          * If so and our tracer is also part of the coredump stopping
1600          * is a deadlock situation, and pointless because our tracer
1601          * is dead so don't allow us to stop.
1602          * If SIGKILL was already sent before the caller unlocked
1603          * ->siglock we must see ->core_state != NULL. Otherwise it
1604          * is safe to enter schedule().
1605          */
1606         if (unlikely(current->mm->core_state) &&
1607             unlikely(current->mm == current->parent->mm))
1608                 return 0;
1609
1610         return 1;
1611 }
1612
1613 /*
1614  * Return nonzero if there is a SIGKILL that should be waking us up.
1615  * Called with the siglock held.
1616  */
1617 static int sigkill_pending(struct task_struct *tsk)
1618 {
1619         return  sigismember(&tsk->pending.signal, SIGKILL) ||
1620                 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1621 }
1622
1623 /*
1624  * This must be called with current->sighand->siglock held.
1625  *
1626  * This should be the path for all ptrace stops.
1627  * We always set current->last_siginfo while stopped here.
1628  * That makes it a way to test a stopped process for
1629  * being ptrace-stopped vs being job-control-stopped.
1630  *
1631  * If we actually decide not to stop at all because the tracer
1632  * is gone, we keep current->exit_code unless clear_code.
1633  */
1634 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1635         __releases(&current->sighand->siglock)
1636         __acquires(&current->sighand->siglock)
1637 {
1638         if (arch_ptrace_stop_needed(exit_code, info)) {
1639                 /*
1640                  * The arch code has something special to do before a
1641                  * ptrace stop.  This is allowed to block, e.g. for faults
1642                  * on user stack pages.  We can't keep the siglock while
1643                  * calling arch_ptrace_stop, so we must release it now.
1644                  * To preserve proper semantics, we must do this before
1645                  * any signal bookkeeping like checking group_stop_count.
1646                  * Meanwhile, a SIGKILL could come in before we retake the
1647                  * siglock.  That must prevent us from sleeping in TASK_TRACED.
1648                  * So after regaining the lock, we must check for SIGKILL.
1649                  */
1650                 spin_unlock_irq(&current->sighand->siglock);
1651                 arch_ptrace_stop(exit_code, info);
1652                 spin_lock_irq(&current->sighand->siglock);
1653                 if (sigkill_pending(current))
1654                         return;
1655         }
1656
1657         /*
1658          * If there is a group stop in progress,
1659          * we must participate in the bookkeeping.
1660          */
1661         if (current->signal->group_stop_count > 0)
1662                 --current->signal->group_stop_count;
1663
1664         current->last_siginfo = info;
1665         current->exit_code = exit_code;
1666
1667         /* Let the debugger run.  */
1668         __set_current_state(TASK_TRACED);
1669         spin_unlock_irq(&current->sighand->siglock);
1670         read_lock(&tasklist_lock);
1671         if (may_ptrace_stop()) {
1672                 do_notify_parent_cldstop(current, CLD_TRAPPED);
1673                 /*
1674                  * Don't want to allow preemption here, because
1675                  * sys_ptrace() needs this task to be inactive.
1676                  *
1677                  * XXX: implement read_unlock_no_resched().
1678                  */
1679                 preempt_disable();
1680                 read_unlock(&tasklist_lock);
1681                 preempt_enable_no_resched();
1682                 schedule();
1683         } else {
1684                 /*
1685                  * By the time we got the lock, our tracer went away.
1686                  * Don't drop the lock yet, another tracer may come.
1687                  */
1688                 __set_current_state(TASK_RUNNING);
1689                 if (clear_code)
1690                         current->exit_code = 0;
1691                 read_unlock(&tasklist_lock);
1692         }
1693
1694         /*
1695          * While in TASK_TRACED, we were considered "frozen enough".
1696          * Now that we woke up, it's crucial if we're supposed to be
1697          * frozen that we freeze now before running anything substantial.
1698          */
1699         try_to_freeze();
1700
1701         /*
1702          * We are back.  Now reacquire the siglock before touching
1703          * last_siginfo, so that we are sure to have synchronized with
1704          * any signal-sending on another CPU that wants to examine it.
1705          */
1706         spin_lock_irq(&current->sighand->siglock);
1707         current->last_siginfo = NULL;
1708
1709         /*
1710          * Queued signals ignored us while we were stopped for tracing.
1711          * So check for any that we should take before resuming user mode.
1712          * This sets TIF_SIGPENDING, but never clears it.
1713          */
1714         recalc_sigpending_tsk(current);
1715 }
1716
1717 void ptrace_notify(int exit_code)
1718 {
1719         siginfo_t info;
1720
1721         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1722
1723         memset(&info, 0, sizeof info);
1724         info.si_signo = SIGTRAP;
1725         info.si_code = exit_code;
1726         info.si_pid = task_pid_vnr(current);
1727         info.si_uid = current_uid();
1728
1729         /* Let the debugger run.  */
1730         spin_lock_irq(&current->sighand->siglock);
1731         ptrace_stop(exit_code, 1, &info);
1732         spin_unlock_irq(&current->sighand->siglock);
1733 }
1734
1735 /*
1736  * This performs the stopping for SIGSTOP and other stop signals.
1737  * We have to stop all threads in the thread group.
1738  * Returns nonzero if we've actually stopped and released the siglock.
1739  * Returns zero if we didn't stop and still hold the siglock.
1740  */
1741 static int do_signal_stop(int signr)
1742 {
1743         struct signal_struct *sig = current->signal;
1744         int notify;
1745
1746         if (!sig->group_stop_count) {
1747                 struct task_struct *t;
1748
1749                 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1750                     unlikely(signal_group_exit(sig)))
1751                         return 0;
1752                 /*
1753                  * There is no group stop already in progress.
1754                  * We must initiate one now.
1755                  */
1756                 sig->group_exit_code = signr;
1757
1758                 sig->group_stop_count = 1;
1759                 for (t = next_thread(current); t != current; t = next_thread(t))
1760                         /*
1761                          * Setting state to TASK_STOPPED for a group
1762                          * stop is always done with the siglock held,
1763                          * so this check has no races.
1764                          */
1765                         if (!(t->flags & PF_EXITING) &&
1766                             !task_is_stopped_or_traced(t)) {
1767                                 sig->group_stop_count++;
1768                                 signal_wake_up(t, 0);
1769                         }
1770         }
1771         /*
1772          * If there are no other threads in the group, or if there is
1773          * a group stop in progress and we are the last to stop, report
1774          * to the parent.  When ptraced, every thread reports itself.
1775          */
1776         notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1777         notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1778         /*
1779          * tracehook_notify_jctl() can drop and reacquire siglock, so
1780          * we keep ->group_stop_count != 0 before the call. If SIGCONT
1781          * or SIGKILL comes in between ->group_stop_count == 0.
1782          */
1783         if (sig->group_stop_count) {
1784                 if (!--sig->group_stop_count)
1785                         sig->flags = SIGNAL_STOP_STOPPED;
1786                 current->exit_code = sig->group_exit_code;
1787                 __set_current_state(TASK_STOPPED);
1788         }
1789         spin_unlock_irq(&current->sighand->siglock);
1790
1791         if (notify) {
1792                 read_lock(&tasklist_lock);
1793                 do_notify_parent_cldstop(current, notify);
1794                 read_unlock(&tasklist_lock);
1795         }
1796
1797         /* Now we don't run again until woken by SIGCONT or SIGKILL */
1798         do {
1799                 schedule();
1800         } while (try_to_freeze());
1801
1802         tracehook_finish_jctl();
1803         current->exit_code = 0;
1804
1805         return 1;
1806 }
1807
1808 static int ptrace_signal(int signr, siginfo_t *info,
1809                          struct pt_regs *regs, void *cookie)
1810 {
1811         if (!task_ptrace(current))
1812                 return signr;
1813
1814         ptrace_signal_deliver(regs, cookie);
1815
1816         /* Let the debugger run.  */
1817         ptrace_stop(signr, 0, info);
1818
1819         /* We're back.  Did the debugger cancel the sig?  */
1820         signr = current->exit_code;
1821         if (signr == 0)
1822                 return signr;
1823
1824         current->exit_code = 0;
1825
1826         /* Update the siginfo structure if the signal has
1827            changed.  If the debugger wanted something
1828            specific in the siginfo structure then it should
1829            have updated *info via PTRACE_SETSIGINFO.  */
1830         if (signr != info->si_signo) {
1831                 info->si_signo = signr;
1832                 info->si_errno = 0;
1833                 info->si_code = SI_USER;
1834                 info->si_pid = task_pid_vnr(current->parent);
1835                 info->si_uid = task_uid(current->parent);
1836         }
1837
1838         /* If the (new) signal is now blocked, requeue it.  */
1839         if (sigismember(&current->blocked, signr)) {
1840                 specific_send_sig_info(signr, info, current);
1841                 signr = 0;
1842         }
1843
1844         return signr;
1845 }
1846
1847 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1848                           struct pt_regs *regs, void *cookie)
1849 {
1850         struct sighand_struct *sighand = current->sighand;
1851         struct signal_struct *signal = current->signal;
1852         int signr;
1853
1854 relock:
1855         /*
1856          * We'll jump back here after any time we were stopped in TASK_STOPPED.
1857          * While in TASK_STOPPED, we were considered "frozen enough".
1858          * Now that we woke up, it's crucial if we're supposed to be
1859          * frozen that we freeze now before running anything substantial.
1860          */
1861         try_to_freeze();
1862
1863         spin_lock_irq(&sighand->siglock);
1864         /*
1865          * Every stopped thread goes here after wakeup. Check to see if
1866          * we should notify the parent, prepare_signal(SIGCONT) encodes
1867          * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1868          */
1869         if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1870                 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1871                                 ? CLD_CONTINUED : CLD_STOPPED;
1872                 signal->flags &= ~SIGNAL_CLD_MASK;
1873
1874                 why = tracehook_notify_jctl(why, CLD_CONTINUED);
1875                 spin_unlock_irq(&sighand->siglock);
1876
1877                 if (why) {
1878                         read_lock(&tasklist_lock);
1879                         do_notify_parent_cldstop(current->group_leader, why);
1880                         read_unlock(&tasklist_lock);
1881                 }
1882                 goto relock;
1883         }
1884
1885         for (;;) {
1886                 struct k_sigaction *ka;
1887                 /*
1888                  * Tracing can induce an artifical signal and choose sigaction.
1889                  * The return value in @signr determines the default action,
1890                  * but @info->si_signo is the signal number we will report.
1891                  */
1892                 signr = tracehook_get_signal(current, regs, info, return_ka);
1893                 if (unlikely(signr < 0))
1894                         goto relock;
1895                 if (unlikely(signr != 0))
1896                         ka = return_ka;
1897                 else {
1898                         if (unlikely(signal->group_stop_count > 0) &&
1899                             do_signal_stop(0))
1900                                 goto relock;
1901
1902                         signr = dequeue_signal(current, &current->blocked,
1903                                                info);
1904
1905                         if (!signr)
1906                                 break; /* will return 0 */
1907
1908                         if (signr != SIGKILL) {
1909                                 signr = ptrace_signal(signr, info,
1910                                                       regs, cookie);
1911                                 if (!signr)
1912                                         continue;
1913                         }
1914
1915                         ka = &sighand->action[signr-1];
1916                 }
1917
1918                 /* Trace actually delivered signals. */
1919                 trace_signal_deliver(signr, info, ka);
1920
1921                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1922                         continue;
1923                 if (ka->sa.sa_handler != SIG_DFL) {
1924                         /* Run the handler.  */
1925                         *return_ka = *ka;
1926
1927                         if (ka->sa.sa_flags & SA_ONESHOT)
1928                                 ka->sa.sa_handler = SIG_DFL;
1929
1930                         break; /* will return non-zero "signr" value */
1931                 }
1932
1933                 /*
1934                  * Now we are doing the default action for this signal.
1935                  */
1936                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1937                         continue;
1938
1939                 /*
1940                  * Global init gets no signals it doesn't want.
1941                  * Container-init gets no signals it doesn't want from same
1942                  * container.
1943                  *
1944                  * Note that if global/container-init sees a sig_kernel_only()
1945                  * signal here, the signal must have been generated internally
1946                  * or must have come from an ancestor namespace. In either
1947                  * case, the signal cannot be dropped.
1948                  */
1949                 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1950                                 !sig_kernel_only(signr))
1951                         continue;
1952
1953                 if (sig_kernel_stop(signr)) {
1954                         /*
1955                          * The default action is to stop all threads in
1956                          * the thread group.  The job control signals
1957                          * do nothing in an orphaned pgrp, but SIGSTOP
1958                          * always works.  Note that siglock needs to be
1959                          * dropped during the call to is_orphaned_pgrp()
1960                          * because of lock ordering with tasklist_lock.
1961                          * This allows an intervening SIGCONT to be posted.
1962                          * We need to check for that and bail out if necessary.
1963                          */
1964                         if (signr != SIGSTOP) {
1965                                 spin_unlock_irq(&sighand->siglock);
1966
1967                                 /* signals can be posted during this window */
1968
1969                                 if (is_current_pgrp_orphaned())
1970                                         goto relock;
1971
1972                                 spin_lock_irq(&sighand->siglock);
1973                         }
1974
1975                         if (likely(do_signal_stop(info->si_signo))) {
1976                                 /* It released the siglock.  */
1977                                 goto relock;
1978                         }
1979
1980                         /*
1981                          * We didn't actually stop, due to a race
1982                          * with SIGCONT or something like that.
1983                          */
1984                         continue;
1985                 }
1986
1987                 spin_unlock_irq(&sighand->siglock);
1988
1989                 /*
1990                  * Anything else is fatal, maybe with a core dump.
1991                  */
1992                 current->flags |= PF_SIGNALED;
1993
1994                 if (sig_kernel_coredump(signr)) {
1995                         if (print_fatal_signals)
1996                                 print_fatal_signal(regs, info->si_signo);
1997                         /*
1998                          * If it was able to dump core, this kills all
1999                          * other threads in the group and synchronizes with
2000                          * their demise.  If we lost the race with another
2001                          * thread getting here, it set group_exit_code
2002                          * first and our do_group_exit call below will use
2003                          * that value and ignore the one we pass it.
2004                          */
2005                         do_coredump(info->si_signo, info->si_signo, regs);
2006                 }
2007
2008                 /*
2009                  * Death signals, no core dump.
2010                  */
2011                 do_group_exit(info->si_signo);
2012                 /* NOTREACHED */
2013         }
2014         spin_unlock_irq(&sighand->siglock);
2015         return signr;
2016 }
2017
2018 void exit_signals(struct task_struct *tsk)
2019 {
2020         int group_stop = 0;
2021         struct task_struct *t;
2022
2023         if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2024                 tsk->flags |= PF_EXITING;
2025                 return;
2026         }
2027
2028         spin_lock_irq(&tsk->sighand->siglock);
2029         /*
2030          * From now this task is not visible for group-wide signals,
2031          * see wants_signal(), do_signal_stop().
2032          */
2033         tsk->flags |= PF_EXITING;
2034         if (!signal_pending(tsk))
2035                 goto out;
2036
2037         /* It could be that __group_complete_signal() choose us to
2038          * notify about group-wide signal. Another thread should be
2039          * woken now to take the signal since we will not.
2040          */
2041         for (t = tsk; (t = next_thread(t)) != tsk; )
2042                 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2043                         recalc_sigpending_and_wake(t);
2044
2045         if (unlikely(tsk->signal->group_stop_count) &&
2046                         !--tsk->signal->group_stop_count) {
2047                 tsk->signal->flags = SIGNAL_STOP_STOPPED;
2048                 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
2049         }
2050 out:
2051         spin_unlock_irq(&tsk->sighand->siglock);
2052
2053         if (unlikely(group_stop)) {
2054                 read_lock(&tasklist_lock);
2055                 do_notify_parent_cldstop(tsk, group_stop);
2056                 read_unlock(&tasklist_lock);
2057         }
2058 }
2059
2060 EXPORT_SYMBOL(recalc_sigpending);
2061 EXPORT_SYMBOL_GPL(dequeue_signal);
2062 EXPORT_SYMBOL(flush_signals);
2063 EXPORT_SYMBOL(force_sig);
2064 EXPORT_SYMBOL(send_sig);
2065 EXPORT_SYMBOL(send_sig_info);
2066 EXPORT_SYMBOL(sigprocmask);
2067 EXPORT_SYMBOL(block_all_signals);
2068 EXPORT_SYMBOL(unblock_all_signals);
2069
2070
2071 /*
2072  * System call entry points.
2073  */
2074
2075 SYSCALL_DEFINE0(restart_syscall)
2076 {
2077         struct restart_block *restart = &current_thread_info()->restart_block;
2078         return restart->fn(restart);
2079 }
2080
2081 long do_no_restart_syscall(struct restart_block *param)
2082 {
2083         return -EINTR;
2084 }
2085
2086 /*
2087  * We don't need to get the kernel lock - this is all local to this
2088  * particular thread.. (and that's good, because this is _heavily_
2089  * used by various programs)
2090  */
2091
2092 /*
2093  * This is also useful for kernel threads that want to temporarily
2094  * (or permanently) block certain signals.
2095  *
2096  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2097  * interface happily blocks "unblockable" signals like SIGKILL
2098  * and friends.
2099  */
2100 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2101 {
2102         int error;
2103
2104         spin_lock_irq(&current->sighand->siglock);
2105         if (oldset)
2106                 *oldset = current->blocked;
2107
2108         error = 0;
2109         switch (how) {
2110         case SIG_BLOCK:
2111                 sigorsets(&current->blocked, &current->blocked, set);
2112                 break;
2113         case SIG_UNBLOCK:
2114                 signandsets(&current->blocked, &current->blocked, set);
2115                 break;
2116         case SIG_SETMASK:
2117                 current->blocked = *set;
2118                 break;
2119         default:
2120                 error = -EINVAL;
2121         }
2122         recalc_sigpending();
2123         spin_unlock_irq(&current->sighand->siglock);
2124
2125         return error;
2126 }
2127
2128 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2129                 sigset_t __user *, oset, size_t, sigsetsize)
2130 {
2131         int error = -EINVAL;
2132         sigset_t old_set, new_set;
2133
2134         /* XXX: Don't preclude handling different sized sigset_t's.  */
2135         if (sigsetsize != sizeof(sigset_t))
2136                 goto out;
2137
2138         if (set) {
2139                 error = -EFAULT;
2140                 if (copy_from_user(&new_set, set, sizeof(*set)))
2141                         goto out;
2142                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2143
2144                 error = sigprocmask(how, &new_set, &old_set);
2145                 if (error)
2146                         goto out;
2147                 if (oset)
2148                         goto set_old;
2149         } else if (oset) {
2150                 spin_lock_irq(&current->sighand->siglock);
2151                 old_set = current->blocked;
2152                 spin_unlock_irq(&current->sighand->siglock);
2153
2154         set_old:
2155                 error = -EFAULT;
2156                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2157                         goto out;
2158         }
2159         error = 0;
2160 out:
2161         return error;
2162 }
2163
2164 long do_sigpending(void __user *set, unsigned long sigsetsize)
2165 {
2166         long error = -EINVAL;
2167         sigset_t pending;
2168
2169         if (sigsetsize > sizeof(sigset_t))
2170                 goto out;
2171
2172         spin_lock_irq(&current->sighand->siglock);
2173         sigorsets(&pending, &current->pending.signal,
2174                   &current->signal->shared_pending.signal);
2175         spin_unlock_irq(&current->sighand->siglock);
2176
2177         /* Outside the lock because only this thread touches it.  */
2178         sigandsets(&pending, &current->blocked, &pending);
2179
2180         error = -EFAULT;
2181         if (!copy_to_user(set, &pending, sigsetsize))
2182                 error = 0;
2183
2184 out:
2185         return error;
2186 }       
2187
2188 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2189 {
2190         return do_sigpending(set, sigsetsize);
2191 }
2192
2193 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2194
2195 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2196 {
2197         int err;
2198
2199         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2200                 return -EFAULT;
2201         if (from->si_code < 0)
2202                 return __copy_to_user(to, from, sizeof(siginfo_t))
2203                         ? -EFAULT : 0;
2204         /*
2205          * If you change siginfo_t structure, please be sure
2206          * this code is fixed accordingly.
2207          * Please remember to update the signalfd_copyinfo() function
2208          * inside fs/signalfd.c too, in case siginfo_t changes.
2209          * It should never copy any pad contained in the structure
2210          * to avoid security leaks, but must copy the generic
2211          * 3 ints plus the relevant union member.
2212          */
2213         err = __put_user(from->si_signo, &to->si_signo);
2214         err |= __put_user(from->si_errno, &to->si_errno);
2215         err |= __put_user((short)from->si_code, &to->si_code);
2216         switch (from->si_code & __SI_MASK) {
2217         case __SI_KILL:
2218                 err |= __put_user(from->si_pid, &to->si_pid);
2219                 err |= __put_user(from->si_uid, &to->si_uid);
2220                 break;
2221         case __SI_TIMER:
2222                  err |= __put_user(from->si_tid, &to->si_tid);
2223                  err |= __put_user(from->si_overrun, &to->si_overrun);
2224                  err |= __put_user(from->si_ptr, &to->si_ptr);
2225                 break;
2226         case __SI_POLL:
2227                 err |= __put_user(from->si_band, &to->si_band);
2228                 err |= __put_user(from->si_fd, &to->si_fd);
2229                 break;
2230         case __SI_FAULT:
2231                 err |= __put_user(from->si_addr, &to->si_addr);
2232 #ifdef __ARCH_SI_TRAPNO
2233                 err |= __put_user(from->si_trapno, &to->si_trapno);
2234 #endif
2235 #ifdef BUS_MCEERR_AO
2236                 /* 
2237                  * Other callers might not initialize the si_lsb field,
2238                  * so check explicitely for the right codes here.
2239                  */
2240                 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2241                         err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2242 #endif
2243                 break;
2244         case __SI_CHLD:
2245                 err |= __put_user(from->si_pid, &to->si_pid);
2246                 err |= __put_user(from->si_uid, &to->si_uid);
2247                 err |= __put_user(from->si_status, &to->si_status);
2248                 err |= __put_user(from->si_utime, &to->si_utime);
2249                 err |= __put_user(from->si_stime, &to->si_stime);
2250                 break;
2251         case __SI_RT: /* This is not generated by the kernel as of now. */
2252         case __SI_MESGQ: /* But this is */
2253                 err |= __put_user(from->si_pid, &to->si_pid);
2254                 err |= __put_user(from->si_uid, &to->si_uid);
2255                 err |= __put_user(from->si_ptr, &to->si_ptr);
2256                 break;
2257         default: /* this is just in case for now ... */
2258                 err |= __put_user(from->si_pid, &to->si_pid);
2259                 err |= __put_user(from->si_uid, &to->si_uid);
2260                 break;
2261         }
2262         return err;
2263 }
2264
2265 #endif
2266
2267 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2268                 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2269                 size_t, sigsetsize)
2270 {
2271         int ret, sig;
2272         sigset_t these;
2273         struct timespec ts;
2274         siginfo_t info;
2275         long timeout = 0;
2276
2277         /* XXX: Don't preclude handling different sized sigset_t's.  */
2278         if (sigsetsize != sizeof(sigset_t))
2279                 return -EINVAL;
2280
2281         if (copy_from_user(&these, uthese, sizeof(these)))
2282                 return -EFAULT;
2283                 
2284         /*
2285          * Invert the set of allowed signals to get those we
2286          * want to block.
2287          */
2288         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2289         signotset(&these);
2290
2291         if (uts) {
2292                 if (copy_from_user(&ts, uts, sizeof(ts)))
2293                         return -EFAULT;
2294                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2295                     || ts.tv_sec < 0)
2296                         return -EINVAL;
2297         }
2298
2299         spin_lock_irq(&current->sighand->siglock);
2300         sig = dequeue_signal(current, &these, &info);
2301         if (!sig) {
2302                 timeout = MAX_SCHEDULE_TIMEOUT;
2303                 if (uts)
2304                         timeout = (timespec_to_jiffies(&ts)
2305                                    + (ts.tv_sec || ts.tv_nsec));
2306
2307                 if (timeout) {
2308                         /* None ready -- temporarily unblock those we're
2309                          * interested while we are sleeping in so that we'll
2310                          * be awakened when they arrive.  */
2311                         current->real_blocked = current->blocked;
2312                         sigandsets(&current->blocked, &current->blocked, &these);
2313                         recalc_sigpending();
2314                         spin_unlock_irq(&current->sighand->siglock);
2315
2316                         timeout = schedule_timeout_interruptible(timeout);
2317
2318                         spin_lock_irq(&current->sighand->siglock);
2319                         sig = dequeue_signal(current, &these, &info);
2320                         current->blocked = current->real_blocked;
2321                         siginitset(&current->real_blocked, 0);
2322                         recalc_sigpending();
2323                 }
2324         }
2325         spin_unlock_irq(&current->sighand->siglock);
2326
2327         if (sig) {
2328                 ret = sig;
2329                 if (uinfo) {
2330                         if (copy_siginfo_to_user(uinfo, &info))
2331                                 ret = -EFAULT;
2332                 }
2333         } else {
2334                 ret = -EAGAIN;
2335                 if (timeout)
2336                         ret = -EINTR;
2337         }
2338
2339         return ret;
2340 }
2341
2342 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2343 {
2344         struct siginfo info;
2345
2346         info.si_signo = sig;
2347         info.si_errno = 0;
2348         info.si_code = SI_USER;
2349         info.si_pid = task_tgid_vnr(current);
2350         info.si_uid = current_uid();
2351
2352         return kill_something_info(sig, &info, pid);
2353 }
2354
2355 static int
2356 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2357 {
2358         struct task_struct *p;
2359         int error = -ESRCH;
2360
2361         rcu_read_lock();
2362         p = find_task_by_vpid(pid);
2363         if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2364                 error = check_kill_permission(sig, info, p);
2365                 /*
2366                  * The null signal is a permissions and process existence
2367                  * probe.  No signal is actually delivered.
2368                  */
2369                 if (!error && sig) {
2370                         error = do_send_sig_info(sig, info, p, false);
2371                         /*
2372                          * If lock_task_sighand() failed we pretend the task
2373                          * dies after receiving the signal. The window is tiny,
2374                          * and the signal is private anyway.
2375                          */
2376                         if (unlikely(error == -ESRCH))
2377                                 error = 0;
2378                 }
2379         }
2380         rcu_read_unlock();
2381
2382         return error;
2383 }
2384
2385 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2386 {
2387         struct siginfo info;
2388
2389         info.si_signo = sig;
2390         info.si_errno = 0;
2391         info.si_code = SI_TKILL;
2392         info.si_pid = task_tgid_vnr(current);
2393         info.si_uid = current_uid();
2394
2395         return do_send_specific(tgid, pid, sig, &info);
2396 }
2397
2398 /**
2399  *  sys_tgkill - send signal to one specific thread
2400  *  @tgid: the thread group ID of the thread
2401  *  @pid: the PID of the thread
2402  *  @sig: signal to be sent
2403  *
2404  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2405  *  exists but it's not belonging to the target process anymore. This
2406  *  method solves the problem of threads exiting and PIDs getting reused.
2407  */
2408 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2409 {
2410         /* This is only valid for single tasks */
2411         if (pid <= 0 || tgid <= 0)
2412                 return -EINVAL;
2413
2414         return do_tkill(tgid, pid, sig);
2415 }
2416
2417 /*
2418  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2419  */
2420 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2421 {
2422         /* This is only valid for single tasks */
2423         if (pid <= 0)
2424                 return -EINVAL;
2425
2426         return do_tkill(0, pid, sig);
2427 }
2428
2429 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2430                 siginfo_t __user *, uinfo)
2431 {
2432         siginfo_t info;
2433
2434         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2435                 return -EFAULT;
2436
2437         /* Not even root can pretend to send signals from the kernel.
2438          * Nor can they impersonate a kill()/tgkill(), which adds source info.
2439          */
2440         if (info.si_code != SI_QUEUE) {
2441                 /* We used to allow any < 0 si_code */
2442                 WARN_ON_ONCE(info.si_code < 0);
2443                 return -EPERM;
2444         }
2445         info.si_signo = sig;
2446
2447         /* POSIX.1b doesn't mention process groups.  */
2448         return kill_proc_info(sig, &info, pid);
2449 }
2450
2451 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2452 {
2453         /* This is only valid for single tasks */
2454         if (pid <= 0 || tgid <= 0)
2455                 return -EINVAL;
2456
2457         /* Not even root can pretend to send signals from the kernel.
2458          * Nor can they impersonate a kill()/tgkill(), which adds source info.
2459          */
2460         if (info->si_code != SI_QUEUE) {
2461                 /* We used to allow any < 0 si_code */
2462                 WARN_ON_ONCE(info->si_code < 0);
2463                 return -EPERM;
2464         }
2465         info->si_signo = sig;
2466
2467         return do_send_specific(tgid, pid, sig, info);
2468 }
2469
2470 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2471                 siginfo_t __user *, uinfo)
2472 {
2473         siginfo_t info;
2474
2475         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2476                 return -EFAULT;
2477
2478         return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2479 }
2480
2481 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2482 {
2483         struct task_struct *t = current;
2484         struct k_sigaction *k;
2485         sigset_t mask;
2486
2487         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2488                 return -EINVAL;
2489
2490         k = &t->sighand->action[sig-1];
2491
2492         spin_lock_irq(&current->sighand->siglock);
2493         if (oact)
2494                 *oact = *k;
2495
2496         if (act) {
2497                 sigdelsetmask(&act->sa.sa_mask,
2498                               sigmask(SIGKILL) | sigmask(SIGSTOP));
2499                 *k = *act;
2500                 /*
2501                  * POSIX 3.3.1.3:
2502                  *  "Setting a signal action to SIG_IGN for a signal that is
2503                  *   pending shall cause the pending signal to be discarded,
2504                  *   whether or not it is blocked."
2505                  *
2506                  *  "Setting a signal action to SIG_DFL for a signal that is
2507                  *   pending and whose default action is to ignore the signal
2508                  *   (for example, SIGCHLD), shall cause the pending signal to
2509                  *   be discarded, whether or not it is blocked"
2510                  */
2511                 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2512                         sigemptyset(&mask);
2513                         sigaddset(&mask, sig);
2514                         rm_from_queue_full(&mask, &t->signal->shared_pending);
2515                         do {
2516                                 rm_from_queue_full(&mask, &t->pending);
2517                                 t = next_thread(t);
2518                         } while (t != current);
2519                 }
2520         }
2521
2522         spin_unlock_irq(&current->sighand->siglock);
2523         return 0;
2524 }
2525
2526 int 
2527 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2528 {
2529         stack_t oss;
2530         int error;
2531
2532         oss.ss_sp = (void __user *) current->sas_ss_sp;
2533         oss.ss_size = current->sas_ss_size;
2534         oss.ss_flags = sas_ss_flags(sp);
2535
2536         if (uss) {
2537                 void __user *ss_sp;
2538                 size_t ss_size;
2539                 int ss_flags;
2540
2541                 error = -EFAULT;
2542                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2543                         goto out;
2544                 error = __get_user(ss_sp, &uss->ss_sp) |
2545                         __get_user(ss_flags, &uss->ss_flags) |
2546                         __get_user(ss_size, &uss->ss_size);
2547                 if (error)
2548                         goto out;
2549
2550                 error = -EPERM;
2551                 if (on_sig_stack(sp))
2552                         goto out;
2553
2554                 error = -EINVAL;
2555                 /*
2556                  *
2557                  * Note - this code used to test ss_flags incorrectly
2558                  *        old code may have been written using ss_flags==0
2559                  *        to mean ss_flags==SS_ONSTACK (as this was the only
2560                  *        way that worked) - this fix preserves that older
2561                  *        mechanism
2562                  */
2563                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2564                         goto out;
2565
2566                 if (ss_flags == SS_DISABLE) {
2567                         ss_size = 0;
2568                         ss_sp = NULL;
2569                 } else {
2570                         error = -ENOMEM;
2571                         if (ss_size < MINSIGSTKSZ)
2572                                 goto out;
2573                 }
2574
2575                 current->sas_ss_sp = (unsigned long) ss_sp;
2576                 current->sas_ss_size = ss_size;
2577         }
2578
2579         error = 0;
2580         if (uoss) {
2581                 error = -EFAULT;
2582                 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2583                         goto out;
2584                 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2585                         __put_user(oss.ss_size, &uoss->ss_size) |
2586                         __put_user(oss.ss_flags, &uoss->ss_flags);
2587         }
2588
2589 out:
2590         return error;
2591 }
2592
2593 #ifdef __ARCH_WANT_SYS_SIGPENDING
2594
2595 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2596 {
2597         return do_sigpending(set, sizeof(*set));
2598 }
2599
2600 #endif
2601
2602 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2603 /* Some platforms have their own version with special arguments others
2604    support only sys_rt_sigprocmask.  */
2605
2606 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2607                 old_sigset_t __user *, oset)
2608 {
2609         int error;
2610         old_sigset_t old_set, new_set;
2611
2612         if (set) {
2613                 error = -EFAULT;
2614                 if (copy_from_user(&new_set, set, sizeof(*set)))
2615                         goto out;
2616                 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2617
2618                 spin_lock_irq(&current->sighand->siglock);
2619                 old_set = current->blocked.sig[0];
2620
2621                 error = 0;
2622                 switch (how) {
2623                 default:
2624                         error = -EINVAL;
2625                         break;
2626                 case SIG_BLOCK:
2627                         sigaddsetmask(&current->blocked, new_set);
2628                         break;
2629                 case SIG_UNBLOCK:
2630                         sigdelsetmask(&current->blocked, new_set);
2631                         break;
2632                 case SIG_SETMASK:
2633                         current->blocked.sig[0] = new_set;
2634                         break;
2635                 }
2636
2637                 recalc_sigpending();
2638                 spin_unlock_irq(&current->sighand->siglock);
2639                 if (error)
2640                         goto out;
2641                 if (oset)
2642                         goto set_old;
2643         } else if (oset) {
2644                 old_set = current->blocked.sig[0];
2645         set_old:
2646                 error = -EFAULT;
2647                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2648                         goto out;
2649         }
2650         error = 0;
2651 out:
2652         return error;
2653 }
2654 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2655
2656 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2657 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2658                 const struct sigaction __user *, act,
2659                 struct sigaction __user *, oact,
2660                 size_t, sigsetsize)
2661 {
2662         struct k_sigaction new_sa, old_sa;
2663         int ret = -EINVAL;
2664
2665         /* XXX: Don't preclude handling different sized sigset_t's.  */
2666         if (sigsetsize != sizeof(sigset_t))
2667                 goto out;
2668
2669         if (act) {
2670                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2671                         return -EFAULT;
2672         }
2673
2674         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2675
2676         if (!ret && oact) {
2677                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2678                         return -EFAULT;
2679         }
2680 out:
2681         return ret;
2682 }
2683 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2684
2685 #ifdef __ARCH_WANT_SYS_SGETMASK
2686
2687 /*
2688  * For backwards compatibility.  Functionality superseded by sigprocmask.
2689  */
2690 SYSCALL_DEFINE0(sgetmask)
2691 {
2692         /* SMP safe */
2693         return current->blocked.sig[0];
2694 }
2695
2696 SYSCALL_DEFINE1(ssetmask, int, newmask)
2697 {
2698         int old;
2699
2700         spin_lock_irq(&current->sighand->siglock);
2701         old = current->blocked.sig[0];
2702
2703         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2704                                                   sigmask(SIGSTOP)));
2705         recalc_sigpending();
2706         spin_unlock_irq(&current->sighand->siglock);
2707
2708         return old;
2709 }
2710 #endif /* __ARCH_WANT_SGETMASK */
2711
2712 #ifdef __ARCH_WANT_SYS_SIGNAL
2713 /*
2714  * For backwards compatibility.  Functionality superseded by sigaction.
2715  */
2716 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2717 {
2718         struct k_sigaction new_sa, old_sa;
2719         int ret;
2720
2721         new_sa.sa.sa_handler = handler;
2722         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2723         sigemptyset(&new_sa.sa.sa_mask);
2724
2725         ret = do_sigaction(sig, &new_sa, &old_sa);
2726
2727         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2728 }
2729 #endif /* __ARCH_WANT_SYS_SIGNAL */
2730
2731 #ifdef __ARCH_WANT_SYS_PAUSE
2732
2733 SYSCALL_DEFINE0(pause)
2734 {
2735         current->state = TASK_INTERRUPTIBLE;
2736         schedule();
2737         return -ERESTARTNOHAND;
2738 }
2739
2740 #endif
2741
2742 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2743 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2744 {
2745         sigset_t newset;
2746
2747         /* XXX: Don't preclude handling different sized sigset_t's.  */
2748         if (sigsetsize != sizeof(sigset_t))
2749                 return -EINVAL;
2750
2751         if (copy_from_user(&newset, unewset, sizeof(newset)))
2752                 return -EFAULT;
2753         sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2754
2755         spin_lock_irq(&current->sighand->siglock);
2756         current->saved_sigmask = current->blocked;
2757         current->blocked = newset;
2758         recalc_sigpending();
2759         spin_unlock_irq(&current->sighand->siglock);
2760
2761         current->state = TASK_INTERRUPTIBLE;
2762         schedule();
2763         set_restore_sigmask();
2764         return -ERESTARTNOHAND;
2765 }
2766 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2767
2768 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2769 {
2770         return NULL;
2771 }
2772
2773 void __init signals_init(void)
2774 {
2775         sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2776 }
2777
2778 #ifdef CONFIG_KGDB_KDB
2779 #include <linux/kdb.h>
2780 /*
2781  * kdb_send_sig_info - Allows kdb to send signals without exposing
2782  * signal internals.  This function checks if the required locks are
2783  * available before calling the main signal code, to avoid kdb
2784  * deadlocks.
2785  */
2786 void
2787 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2788 {
2789         static struct task_struct *kdb_prev_t;
2790         int sig, new_t;
2791         if (!spin_trylock(&t->sighand->siglock)) {
2792                 kdb_printf("Can't do kill command now.\n"
2793                            "The sigmask lock is held somewhere else in "
2794                            "kernel, try again later\n");
2795                 return;
2796         }
2797         spin_unlock(&t->sighand->siglock);
2798         new_t = kdb_prev_t != t;
2799         kdb_prev_t = t;
2800         if (t->state != TASK_RUNNING && new_t) {
2801                 kdb_printf("Process is not RUNNING, sending a signal from "
2802                            "kdb risks deadlock\n"
2803                            "on the run queue locks. "
2804                            "The signal has _not_ been sent.\n"
2805                            "Reissue the kill command if you want to risk "
2806                            "the deadlock.\n");
2807                 return;
2808         }
2809         sig = info->si_signo;
2810         if (send_sig_info(sig, info, t))
2811                 kdb_printf("Fail to deliver Signal %d to process %d.\n",
2812                            sig, t->pid);
2813         else
2814                 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2815 }
2816 #endif  /* CONFIG_KGDB_KDB */