2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
15 #include <linux/config.h>
17 #include <linux/irq.h>
18 #include <linux/delay.h>
19 #include <linux/bootmem.h>
20 #include <linux/smp_lock.h>
21 #include <linux/interrupt.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/sysdev.h>
26 #include <linux/nmi.h>
27 #include <linux/sysctl.h>
31 #include <asm/mpspec.h>
34 #include <asm/proto.h>
35 #include <asm/kdebug.h>
38 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
39 * - it may be reserved by some other driver, or not
40 * - when not reserved by some other driver, it may be used for
41 * the NMI watchdog, or not
43 * This is maintained separately from nmi_active because the NMI
44 * watchdog may also be driven from the I/O APIC timer.
46 static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
47 static unsigned int lapic_nmi_owner;
48 #define LAPIC_NMI_WATCHDOG (1<<0)
49 #define LAPIC_NMI_RESERVED (1<<1)
52 * +1: the lapic NMI watchdog is active, but can be disabled
53 * 0: the lapic NMI watchdog has not been set up, and cannot
55 * -1: the lapic NMI watchdog is disabled, but can be enabled
57 int nmi_active; /* oprofile uses this */
60 unsigned int nmi_watchdog = NMI_DEFAULT;
61 static unsigned int nmi_hz = HZ;
62 unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
64 /* Note that these events don't tick when the CPU idles. This means
65 the frequency varies with CPU load. */
67 #define K7_EVNTSEL_ENABLE (1 << 22)
68 #define K7_EVNTSEL_INT (1 << 20)
69 #define K7_EVNTSEL_OS (1 << 17)
70 #define K7_EVNTSEL_USR (1 << 16)
71 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
72 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
74 #define P6_EVNTSEL0_ENABLE (1 << 22)
75 #define P6_EVNTSEL_INT (1 << 20)
76 #define P6_EVNTSEL_OS (1 << 17)
77 #define P6_EVNTSEL_USR (1 << 16)
78 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
79 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
81 /* Run after command line and cpu_init init, but before all other checks */
82 void __init nmi_watchdog_default(void)
84 if (nmi_watchdog != NMI_DEFAULT)
87 /* For some reason the IO APIC watchdog doesn't work on the AMD
88 8111 chipset. For now switch to local APIC mode using
89 perfctr0 there. On Intel CPUs we don't have code to handle
90 the perfctr and the IO-APIC seems to work, so use that. */
92 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
93 nmi_watchdog = NMI_LOCAL_APIC;
95 "Using local APIC NMI watchdog using perfctr0\n");
97 printk(KERN_INFO "Using IO APIC NMI watchdog\n");
98 nmi_watchdog = NMI_IO_APIC;
102 /* Why is there no CPUID flag for this? */
103 static __init int cpu_has_lapic(void)
105 switch (boot_cpu_data.x86_vendor) {
106 case X86_VENDOR_INTEL:
108 return boot_cpu_data.x86 >= 6;
109 /* .... add more cpus here or find a different way to figure this out. */
115 int __init check_nmi_watchdog (void)
120 if (nmi_watchdog == NMI_LOCAL_APIC && !cpu_has_lapic()) {
121 nmi_watchdog = NMI_NONE;
125 printk(KERN_INFO "testing NMI watchdog ... ");
127 for (cpu = 0; cpu < NR_CPUS; cpu++)
128 counts[cpu] = cpu_pda[cpu].__nmi_count;
130 mdelay((10*1000)/nmi_hz); // wait 10 ticks
132 for (cpu = 0; cpu < NR_CPUS; cpu++) {
134 /* Check cpu_callin_map here because that is set
135 after the timer is started. */
136 if (!cpu_isset(cpu, cpu_callin_map))
139 if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) {
140 printk("CPU#%d: NMI appears to be stuck (%d)!\n",
142 cpu_pda[cpu].__nmi_count);
144 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
150 /* now that we know it works we can reduce NMI frequency to
151 something more reasonable; makes a difference in some configs */
152 if (nmi_watchdog == NMI_LOCAL_APIC)
158 int __init setup_nmi_watchdog(char *str)
162 if (!strncmp(str,"panic",5)) {
163 panic_on_timeout = 1;
164 str = strchr(str, ',');
170 get_option(&str, &nmi);
172 if (nmi >= NMI_INVALID)
178 __setup("nmi_watchdog=", setup_nmi_watchdog);
180 static void disable_lapic_nmi_watchdog(void)
184 switch (boot_cpu_data.x86_vendor) {
186 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
188 case X86_VENDOR_INTEL:
189 wrmsr(MSR_IA32_EVNTSEL0, 0, 0);
193 /* tell do_nmi() and others that we're not active any more */
197 static void enable_lapic_nmi_watchdog(void)
199 if (nmi_active < 0) {
200 nmi_watchdog = NMI_LOCAL_APIC;
201 setup_apic_nmi_watchdog();
205 int reserve_lapic_nmi(void)
207 unsigned int old_owner;
209 spin_lock(&lapic_nmi_owner_lock);
210 old_owner = lapic_nmi_owner;
211 lapic_nmi_owner |= LAPIC_NMI_RESERVED;
212 spin_unlock(&lapic_nmi_owner_lock);
213 if (old_owner & LAPIC_NMI_RESERVED)
215 if (old_owner & LAPIC_NMI_WATCHDOG)
216 disable_lapic_nmi_watchdog();
220 void release_lapic_nmi(void)
222 unsigned int new_owner;
224 spin_lock(&lapic_nmi_owner_lock);
225 new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
226 lapic_nmi_owner = new_owner;
227 spin_unlock(&lapic_nmi_owner_lock);
228 if (new_owner & LAPIC_NMI_WATCHDOG)
229 enable_lapic_nmi_watchdog();
232 void disable_timer_nmi_watchdog(void)
234 if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
238 unset_nmi_callback();
240 nmi_watchdog = NMI_NONE;
243 void enable_timer_nmi_watchdog(void)
245 if (nmi_active < 0) {
246 nmi_watchdog = NMI_IO_APIC;
247 touch_nmi_watchdog();
255 static int nmi_pm_active; /* nmi_active before suspend */
257 static int lapic_nmi_suspend(struct sys_device *dev, u32 state)
259 nmi_pm_active = nmi_active;
260 disable_lapic_nmi_watchdog();
264 static int lapic_nmi_resume(struct sys_device *dev)
266 if (nmi_pm_active > 0)
267 enable_lapic_nmi_watchdog();
271 static struct sysdev_class nmi_sysclass = {
272 set_kset_name("lapic_nmi"),
273 .resume = lapic_nmi_resume,
274 .suspend = lapic_nmi_suspend,
277 static struct sys_device device_lapic_nmi = {
279 .cls = &nmi_sysclass,
282 static int __init init_lapic_nmi_sysfs(void)
286 if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
289 error = sysdev_class_register(&nmi_sysclass);
291 error = sysdev_register(&device_lapic_nmi);
294 /* must come after the local APIC's device_initcall() */
295 late_initcall(init_lapic_nmi_sysfs);
297 #endif /* CONFIG_PM */
300 * Activate the NMI watchdog via the local APIC.
301 * Original code written by Keith Owens.
304 static void setup_k7_watchdog(void)
307 unsigned int evntsel;
309 /* No check, so can start with slow frequency */
312 /* XXX should check these in EFER */
314 nmi_perfctr_msr = MSR_K7_PERFCTR0;
316 for(i = 0; i < 4; ++i) {
317 /* Simulator may not support it */
318 if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL))
320 wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
323 evntsel = K7_EVNTSEL_INT
328 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
329 wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz*1000) / nmi_hz);
330 apic_write(APIC_LVTPC, APIC_DM_NMI);
331 evntsel |= K7_EVNTSEL_ENABLE;
332 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
335 void setup_apic_nmi_watchdog(void)
337 switch (boot_cpu_data.x86_vendor) {
339 if (boot_cpu_data.x86 < 6)
341 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
348 lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
353 * the best way to detect whether a CPU has a 'hard lockup' problem
354 * is to check it's local APIC timer IRQ counts. If they are not
355 * changing then that CPU has some problem.
357 * as these watchdog NMI IRQs are generated on every CPU, we only
358 * have to check the current processor.
360 * since NMIs don't listen to _any_ locks, we have to be extremely
361 * careful not to rely on unsafe variables. The printk might lock
362 * up though, so we have to break up any console locks first ...
363 * [when there will be more tty-related locks, break them up
368 last_irq_sums [NR_CPUS],
369 alert_counter [NR_CPUS];
371 void touch_nmi_watchdog (void)
376 * Just reset the alert counters, (other CPUs might be
377 * spinning on locks we hold):
379 for (i = 0; i < NR_CPUS; i++)
380 alert_counter[i] = 0;
383 void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
387 cpu = safe_smp_processor_id();
388 sum = read_pda(apic_timer_irqs);
389 if (last_irq_sums[cpu] == sum) {
391 * Ayiee, looks like this CPU is stuck ...
392 * wait a few IRQs (5 seconds) before doing the oops ...
394 alert_counter[cpu]++;
395 if (alert_counter[cpu] == 5*nmi_hz) {
396 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
398 alert_counter[cpu] = 0;
401 die_nmi("NMI Watchdog detected LOCKUP on CPU%d", regs);
404 last_irq_sums[cpu] = sum;
405 alert_counter[cpu] = 0;
408 wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
411 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
416 static nmi_callback_t nmi_callback = dummy_nmi_callback;
418 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
420 int cpu = safe_smp_processor_id();
423 add_pda(__nmi_count,1);
424 if (!nmi_callback(regs, cpu))
425 default_do_nmi(regs);
429 void set_nmi_callback(nmi_callback_t callback)
431 nmi_callback = callback;
434 void unset_nmi_callback(void)
436 nmi_callback = dummy_nmi_callback;
441 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
443 unsigned char reason = get_nmi_reason();
446 if (!(reason & 0xc0)) {
447 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
454 * proc handler for /proc/sys/kernel/unknown_nmi_panic
456 int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
457 void __user *buffer, size_t *length, loff_t *ppos)
461 old_state = unknown_nmi_panic;
462 proc_dointvec(table, write, file, buffer, length, ppos);
463 if (!!old_state == !!unknown_nmi_panic)
466 if (unknown_nmi_panic) {
467 if (reserve_lapic_nmi() < 0) {
468 unknown_nmi_panic = 0;
471 set_nmi_callback(unknown_nmi_panic_callback);
475 unset_nmi_callback();
482 EXPORT_SYMBOL(nmi_active);
483 EXPORT_SYMBOL(nmi_watchdog);
484 EXPORT_SYMBOL(reserve_lapic_nmi);
485 EXPORT_SYMBOL(release_lapic_nmi);
486 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
487 EXPORT_SYMBOL(enable_timer_nmi_watchdog);
488 EXPORT_SYMBOL(touch_nmi_watchdog);