Merge branch 'connlimit' of git://dev.medozas.de/linux
[pandora-kernel.git] / arch / x86 / kernel / apic / hw_nmi.c
1 /*
2  *  HW NMI watchdog support
3  *
4  *  started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  *  Arch specific calls to support NMI watchdog
7  *
8  *  Bits copied from original nmi.c file
9  *
10  */
11 #include <asm/apic.h>
12
13 #include <linux/cpumask.h>
14 #include <linux/kdebug.h>
15 #include <linux/notifier.h>
16 #include <linux/kprobes.h>
17 #include <linux/nmi.h>
18 #include <linux/module.h>
19
20 #ifdef CONFIG_HARDLOCKUP_DETECTOR
21 u64 hw_nmi_get_sample_period(void)
22 {
23         return (u64)(cpu_khz) * 1000 * 60;
24 }
25 #endif
26
27 #ifdef arch_trigger_all_cpu_backtrace
28 /* For reliability, we're prepared to waste bits here. */
29 static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
30
31 /* "in progress" flag of arch_trigger_all_cpu_backtrace */
32 static unsigned long backtrace_flag;
33
34 void arch_trigger_all_cpu_backtrace(void)
35 {
36         int i;
37
38         if (test_and_set_bit(0, &backtrace_flag))
39                 /*
40                  * If there is already a trigger_all_cpu_backtrace() in progress
41                  * (backtrace_flag == 1), don't output double cpu dump infos.
42                  */
43                 return;
44
45         cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
46
47         printk(KERN_INFO "sending NMI to all CPUs:\n");
48         apic->send_IPI_all(NMI_VECTOR);
49
50         /* Wait for up to 10 seconds for all CPUs to do the backtrace */
51         for (i = 0; i < 10 * 1000; i++) {
52                 if (cpumask_empty(to_cpumask(backtrace_mask)))
53                         break;
54                 mdelay(1);
55         }
56
57         clear_bit(0, &backtrace_flag);
58         smp_mb__after_clear_bit();
59 }
60
61 static int __kprobes
62 arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
63                          unsigned long cmd, void *__args)
64 {
65         struct die_args *args = __args;
66         struct pt_regs *regs;
67         int cpu;
68
69         switch (cmd) {
70         case DIE_NMI:
71                 break;
72
73         default:
74                 return NOTIFY_DONE;
75         }
76
77         regs = args->regs;
78         cpu = smp_processor_id();
79
80         if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
81                 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
82
83                 arch_spin_lock(&lock);
84                 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
85                 show_regs(regs);
86                 dump_stack();
87                 arch_spin_unlock(&lock);
88                 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
89                 return NOTIFY_STOP;
90         }
91
92         return NOTIFY_DONE;
93 }
94
95 static __read_mostly struct notifier_block backtrace_notifier = {
96         .notifier_call          = arch_trigger_all_cpu_backtrace_handler,
97         .next                   = NULL,
98         .priority               = NMI_LOCAL_LOW_PRIOR,
99 };
100
101 static int __init register_trigger_all_cpu_backtrace(void)
102 {
103         register_die_notifier(&backtrace_notifier);
104         return 0;
105 }
106 early_initcall(register_trigger_all_cpu_backtrace);
107 #endif