2 * linux/arch/i386/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
11 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
13 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/config.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/nmi.h>
21 #include <linux/sysdev.h>
22 #include <linux/sysctl.h>
23 #include <linux/percpu.h>
27 #include <asm/kdebug.h>
29 #include "mach_traps.h"
31 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
32 * evtsel_nmi_owner tracks the ownership of the event selection
33 * - different performance counters/ event selection may be reserved for
34 * different subsystems this reservation system just tries to coordinate
37 static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
38 static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
40 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
41 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
43 #define NMI_MAX_COUNTER_BITS 66
46 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
47 * - it may be reserved by some other driver, or not
48 * - when not reserved by some other driver, it may be used for
49 * the NMI watchdog, or not
51 * This is maintained separately from nmi_active because the NMI
52 * watchdog may also be driven from the I/O APIC timer.
54 static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
55 static unsigned int lapic_nmi_owner;
56 #define LAPIC_NMI_WATCHDOG (1<<0)
57 #define LAPIC_NMI_RESERVED (1<<1)
60 * >0: the lapic NMI watchdog is active, but can be disabled
61 * <0: the lapic NMI watchdog has not been set up, and cannot
63 * 0: the lapic NMI watchdog is disabled, but can be enabled
65 atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
67 unsigned int nmi_watchdog = NMI_DEFAULT;
68 static unsigned int nmi_hz = HZ;
70 struct nmi_watchdog_ctlblk {
73 unsigned int cccr_msr;
74 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
75 unsigned int evntsel_msr; /* the MSR to select the events to handle */
77 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
79 /* local prototypes */
80 static void stop_apic_nmi_watchdog(void *unused);
81 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
83 extern void show_registers(struct pt_regs *regs);
84 extern int unknown_nmi_panic;
86 /* converts an msr to an appropriate reservation bit */
87 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
89 /* returns the bit offset of the performance counter register */
90 switch (boot_cpu_data.x86_vendor) {
92 return (msr - MSR_K7_PERFCTR0);
93 case X86_VENDOR_INTEL:
94 switch (boot_cpu_data.x86) {
96 return (msr - MSR_P6_PERFCTR0);
98 return (msr - MSR_P4_BPU_PERFCTR0);
104 /* converts an msr to an appropriate reservation bit */
105 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
107 /* returns the bit offset of the event selection register */
108 switch (boot_cpu_data.x86_vendor) {
110 return (msr - MSR_K7_EVNTSEL0);
111 case X86_VENDOR_INTEL:
112 switch (boot_cpu_data.x86) {
114 return (msr - MSR_P6_EVNTSEL0);
116 return (msr - MSR_P4_BSU_ESCR0);
122 /* checks for a bit availability (hack for oprofile) */
123 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
125 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
127 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
130 /* checks the an msr for availability */
131 int avail_to_resrv_perfctr_nmi(unsigned int msr)
133 unsigned int counter;
135 counter = nmi_perfctr_msr_to_bit(msr);
136 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
138 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
141 int reserve_perfctr_nmi(unsigned int msr)
143 unsigned int counter;
145 counter = nmi_perfctr_msr_to_bit(msr);
146 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
148 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
153 void release_perfctr_nmi(unsigned int msr)
155 unsigned int counter;
157 counter = nmi_perfctr_msr_to_bit(msr);
158 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
160 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
163 int reserve_evntsel_nmi(unsigned int msr)
165 unsigned int counter;
167 counter = nmi_evntsel_msr_to_bit(msr);
168 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
170 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]))
175 void release_evntsel_nmi(unsigned int msr)
177 unsigned int counter;
179 counter = nmi_evntsel_msr_to_bit(msr);
180 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
182 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]);
185 static __cpuinit inline int nmi_known_cpu(void)
187 switch (boot_cpu_data.x86_vendor) {
189 return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6));
190 case X86_VENDOR_INTEL:
191 return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6));
197 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
198 * the CPU is idle. To make sure the NMI watchdog really ticks on all
199 * CPUs during the test make them busy.
201 static __init void nmi_cpu_busy(void *data)
203 volatile int *endflag = data;
204 local_irq_enable_in_hardirq();
205 /* Intentionally don't use cpu_relax here. This is
206 to make sure that the performance counter really ticks,
207 even if there is a simulator or similar that catches the
208 pause instruction. On a real HT machine this is fine because
209 all other CPUs are busy with "useless" delay loops and don't
210 care if they get somewhat less cycles. */
211 while (*endflag == 0)
216 static int __init check_nmi_watchdog(void)
218 volatile int endflag = 0;
219 unsigned int *prev_nmi_count;
222 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
225 if (!atomic_read(&nmi_active))
228 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
232 printk(KERN_INFO "Testing NMI watchdog ... ");
234 if (nmi_watchdog == NMI_LOCAL_APIC)
235 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
237 for_each_possible_cpu(cpu)
238 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
240 mdelay((10*1000)/nmi_hz); // wait 10 ticks
242 for_each_possible_cpu(cpu) {
244 /* Check cpu_callin_map here because that is set
245 after the timer is started. */
246 if (!cpu_isset(cpu, cpu_callin_map))
249 if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
251 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
252 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
256 per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0;
257 atomic_dec(&nmi_active);
260 if (!atomic_read(&nmi_active)) {
261 kfree(prev_nmi_count);
262 atomic_set(&nmi_active, -1);
268 /* now that we know it works we can reduce NMI frequency to
269 something more reasonable; makes a difference in some configs */
270 if (nmi_watchdog == NMI_LOCAL_APIC)
273 kfree(prev_nmi_count);
276 /* This needs to happen later in boot so counters are working */
277 late_initcall(check_nmi_watchdog);
279 static int __init setup_nmi_watchdog(char *str)
283 get_option(&str, &nmi);
285 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
288 * If any other x86 CPU has a local APIC, then
289 * please test the NMI stuff there and send me the
290 * missing bits. Right now Intel P6/P4 and AMD K7 only.
292 if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0))
293 return 0; /* no lapic support */
298 __setup("nmi_watchdog=", setup_nmi_watchdog);
300 static void disable_lapic_nmi_watchdog(void)
302 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
304 if (atomic_read(&nmi_active) <= 0)
307 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
309 BUG_ON(atomic_read(&nmi_active) != 0);
312 static void enable_lapic_nmi_watchdog(void)
314 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
316 /* are we already enabled */
317 if (atomic_read(&nmi_active) != 0)
320 /* are we lapic aware */
321 if (nmi_known_cpu() <= 0)
324 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
325 touch_nmi_watchdog();
328 int reserve_lapic_nmi(void)
330 unsigned int old_owner;
332 spin_lock(&lapic_nmi_owner_lock);
333 old_owner = lapic_nmi_owner;
334 lapic_nmi_owner |= LAPIC_NMI_RESERVED;
335 spin_unlock(&lapic_nmi_owner_lock);
336 if (old_owner & LAPIC_NMI_RESERVED)
338 if (old_owner & LAPIC_NMI_WATCHDOG)
339 disable_lapic_nmi_watchdog();
343 void release_lapic_nmi(void)
345 unsigned int new_owner;
347 spin_lock(&lapic_nmi_owner_lock);
348 new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
349 lapic_nmi_owner = new_owner;
350 spin_unlock(&lapic_nmi_owner_lock);
351 if (new_owner & LAPIC_NMI_WATCHDOG)
352 enable_lapic_nmi_watchdog();
355 void disable_timer_nmi_watchdog(void)
357 BUG_ON(nmi_watchdog != NMI_IO_APIC);
359 if (atomic_read(&nmi_active) <= 0)
363 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
365 BUG_ON(atomic_read(&nmi_active) != 0);
368 void enable_timer_nmi_watchdog(void)
370 BUG_ON(nmi_watchdog != NMI_IO_APIC);
372 if (atomic_read(&nmi_active) == 0) {
373 touch_nmi_watchdog();
374 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
381 static int nmi_pm_active; /* nmi_active before suspend */
383 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
385 nmi_pm_active = atomic_read(&nmi_active);
386 disable_lapic_nmi_watchdog();
390 static int lapic_nmi_resume(struct sys_device *dev)
392 if (nmi_pm_active > 0)
393 enable_lapic_nmi_watchdog();
398 static struct sysdev_class nmi_sysclass = {
399 set_kset_name("lapic_nmi"),
400 .resume = lapic_nmi_resume,
401 .suspend = lapic_nmi_suspend,
404 static struct sys_device device_lapic_nmi = {
406 .cls = &nmi_sysclass,
409 static int __init init_lapic_nmi_sysfs(void)
413 /* should really be a BUG_ON but b/c this is an
414 * init call, it just doesn't work. -dcz
416 if (nmi_watchdog != NMI_LOCAL_APIC)
419 if ( atomic_read(&nmi_active) < 0 )
422 error = sysdev_class_register(&nmi_sysclass);
424 error = sysdev_register(&device_lapic_nmi);
427 /* must come after the local APIC's device_initcall() */
428 late_initcall(init_lapic_nmi_sysfs);
430 #endif /* CONFIG_PM */
433 * Activate the NMI watchdog via the local APIC.
434 * Original code written by Keith Owens.
437 static void write_watchdog_counter(unsigned int perfctr_msr, const char *descr)
439 u64 count = (u64)cpu_khz * 1000;
441 do_div(count, nmi_hz);
443 Dprintk("setting %s to -0x%08Lx\n", descr, count);
444 wrmsrl(perfctr_msr, 0 - count);
447 /* Note that these events don't tick when the CPU idles. This means
448 the frequency varies with CPU load. */
450 #define K7_EVNTSEL_ENABLE (1 << 22)
451 #define K7_EVNTSEL_INT (1 << 20)
452 #define K7_EVNTSEL_OS (1 << 17)
453 #define K7_EVNTSEL_USR (1 << 16)
454 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
455 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
457 static int setup_k7_watchdog(void)
459 unsigned int perfctr_msr, evntsel_msr;
460 unsigned int evntsel;
461 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
463 perfctr_msr = MSR_K7_PERFCTR0;
464 evntsel_msr = MSR_K7_EVNTSEL0;
465 if (!reserve_perfctr_nmi(perfctr_msr))
468 if (!reserve_evntsel_nmi(evntsel_msr))
471 wrmsrl(perfctr_msr, 0UL);
473 evntsel = K7_EVNTSEL_INT
478 /* setup the timer */
479 wrmsr(evntsel_msr, evntsel, 0);
480 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0");
481 apic_write(APIC_LVTPC, APIC_DM_NMI);
482 evntsel |= K7_EVNTSEL_ENABLE;
483 wrmsr(evntsel_msr, evntsel, 0);
485 wd->perfctr_msr = perfctr_msr;
486 wd->evntsel_msr = evntsel_msr;
487 wd->cccr_msr = 0; //unused
488 wd->check_bit = 1ULL<<63;
491 release_perfctr_nmi(perfctr_msr);
496 static void stop_k7_watchdog(void)
498 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
500 wrmsr(wd->evntsel_msr, 0, 0);
502 release_evntsel_nmi(wd->evntsel_msr);
503 release_perfctr_nmi(wd->perfctr_msr);
506 #define P6_EVNTSEL0_ENABLE (1 << 22)
507 #define P6_EVNTSEL_INT (1 << 20)
508 #define P6_EVNTSEL_OS (1 << 17)
509 #define P6_EVNTSEL_USR (1 << 16)
510 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
511 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
513 static int setup_p6_watchdog(void)
515 unsigned int perfctr_msr, evntsel_msr;
516 unsigned int evntsel;
517 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
519 perfctr_msr = MSR_P6_PERFCTR0;
520 evntsel_msr = MSR_P6_EVNTSEL0;
521 if (!reserve_perfctr_nmi(perfctr_msr))
524 if (!reserve_evntsel_nmi(evntsel_msr))
527 wrmsrl(perfctr_msr, 0UL);
529 evntsel = P6_EVNTSEL_INT
534 /* setup the timer */
535 wrmsr(evntsel_msr, evntsel, 0);
536 write_watchdog_counter(perfctr_msr, "P6_PERFCTR0");
537 apic_write(APIC_LVTPC, APIC_DM_NMI);
538 evntsel |= P6_EVNTSEL0_ENABLE;
539 wrmsr(evntsel_msr, evntsel, 0);
541 wd->perfctr_msr = perfctr_msr;
542 wd->evntsel_msr = evntsel_msr;
543 wd->cccr_msr = 0; //unused
544 wd->check_bit = 1ULL<<39;
547 release_perfctr_nmi(perfctr_msr);
552 static void stop_p6_watchdog(void)
554 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
556 wrmsr(wd->evntsel_msr, 0, 0);
558 release_evntsel_nmi(wd->evntsel_msr);
559 release_perfctr_nmi(wd->perfctr_msr);
562 /* Note that these events don't tick when the CPU idles. This means
563 the frequency varies with CPU load. */
565 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
566 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
567 #define P4_ESCR_OS (1<<3)
568 #define P4_ESCR_USR (1<<2)
569 #define P4_CCCR_OVF_PMI0 (1<<26)
570 #define P4_CCCR_OVF_PMI1 (1<<27)
571 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
572 #define P4_CCCR_COMPLEMENT (1<<19)
573 #define P4_CCCR_COMPARE (1<<18)
574 #define P4_CCCR_REQUIRED (3<<16)
575 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
576 #define P4_CCCR_ENABLE (1<<12)
577 #define P4_CCCR_OVF (1<<31)
578 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
579 CRU_ESCR0 (with any non-null event selector) through a complemented
580 max threshold. [IA32-Vol3, Section 14.9.9] */
582 static int setup_p4_watchdog(void)
584 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
585 unsigned int evntsel, cccr_val;
586 unsigned int misc_enable, dummy;
588 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
590 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
591 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
595 /* detect which hyperthread we are on */
596 if (smp_num_siblings == 2) {
597 unsigned int ebx, apicid;
600 apicid = (ebx >> 24) & 0xff;
606 /* performance counters are shared resources
607 * assign each hyperthread its own set
608 * (re-use the ESCR0 register, seems safe
609 * and keeps the cccr_val the same)
613 perfctr_msr = MSR_P4_IQ_PERFCTR0;
614 evntsel_msr = MSR_P4_CRU_ESCR0;
615 cccr_msr = MSR_P4_IQ_CCCR0;
616 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
619 perfctr_msr = MSR_P4_IQ_PERFCTR1;
620 evntsel_msr = MSR_P4_CRU_ESCR0;
621 cccr_msr = MSR_P4_IQ_CCCR1;
622 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
625 if (!reserve_perfctr_nmi(perfctr_msr))
628 if (!reserve_evntsel_nmi(evntsel_msr))
631 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
635 cccr_val |= P4_CCCR_THRESHOLD(15)
640 wrmsr(evntsel_msr, evntsel, 0);
641 wrmsr(cccr_msr, cccr_val, 0);
642 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0");
643 apic_write(APIC_LVTPC, APIC_DM_NMI);
644 cccr_val |= P4_CCCR_ENABLE;
645 wrmsr(cccr_msr, cccr_val, 0);
646 wd->perfctr_msr = perfctr_msr;
647 wd->evntsel_msr = evntsel_msr;
648 wd->cccr_msr = cccr_msr;
649 wd->check_bit = 1ULL<<39;
652 release_perfctr_nmi(perfctr_msr);
657 static void stop_p4_watchdog(void)
659 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
661 wrmsr(wd->cccr_msr, 0, 0);
662 wrmsr(wd->evntsel_msr, 0, 0);
664 release_evntsel_nmi(wd->evntsel_msr);
665 release_perfctr_nmi(wd->perfctr_msr);
668 void setup_apic_nmi_watchdog (void *unused)
670 /* only support LOCAL and IO APICs for now */
671 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
672 (nmi_watchdog != NMI_IO_APIC))
675 if (nmi_watchdog == NMI_LOCAL_APIC) {
676 switch (boot_cpu_data.x86_vendor) {
678 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15)
680 if (!setup_k7_watchdog())
683 case X86_VENDOR_INTEL:
684 switch (boot_cpu_data.x86) {
686 if (boot_cpu_data.x86_model > 0xd)
689 if (!setup_p6_watchdog())
693 if (boot_cpu_data.x86_model > 0x4)
696 if (!setup_p4_watchdog())
707 __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 1;
708 atomic_inc(&nmi_active);
711 static void stop_apic_nmi_watchdog(void *unused)
713 /* only support LOCAL and IO APICs for now */
714 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
715 (nmi_watchdog != NMI_IO_APIC))
718 if (nmi_watchdog == NMI_LOCAL_APIC) {
719 switch (boot_cpu_data.x86_vendor) {
723 case X86_VENDOR_INTEL:
724 switch (boot_cpu_data.x86) {
726 if (boot_cpu_data.x86_model > 0xd)
731 if (boot_cpu_data.x86_model > 0x4)
741 __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 0;
742 atomic_dec(&nmi_active);
746 * the best way to detect whether a CPU has a 'hard lockup' problem
747 * is to check it's local APIC timer IRQ counts. If they are not
748 * changing then that CPU has some problem.
750 * as these watchdog NMI IRQs are generated on every CPU, we only
751 * have to check the current processor.
753 * since NMIs don't listen to _any_ locks, we have to be extremely
754 * careful not to rely on unsafe variables. The printk might lock
755 * up though, so we have to break up any console locks first ...
756 * [when there will be more tty-related locks, break them up
761 last_irq_sums [NR_CPUS],
762 alert_counter [NR_CPUS];
764 void touch_nmi_watchdog (void)
769 * Just reset the alert counters, (other CPUs might be
770 * spinning on locks we hold):
772 for_each_possible_cpu(i)
773 alert_counter[i] = 0;
776 * Tickle the softlockup detector too:
778 touch_softlockup_watchdog();
780 EXPORT_SYMBOL(touch_nmi_watchdog);
782 extern void die_nmi(struct pt_regs *, const char *msg);
784 void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
788 * Since current_thread_info()-> is always on the stack, and we
789 * always switch the stack NMI-atomically, it's safe to use
790 * smp_processor_id().
794 int cpu = smp_processor_id();
795 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
798 /* check for other users first */
799 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
804 sum = per_cpu(irq_stat, cpu).apic_timer_irqs;
806 /* if the apic timer isn't firing, this cpu isn't doing much */
807 if (!touched && last_irq_sums[cpu] == sum) {
809 * Ayiee, looks like this CPU is stuck ...
810 * wait a few IRQs (5 seconds) before doing the oops ...
812 alert_counter[cpu]++;
813 if (alert_counter[cpu] == 5*nmi_hz)
815 * die_nmi will return ONLY if NOTIFY_STOP happens..
817 die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP");
819 last_irq_sums[cpu] = sum;
820 alert_counter[cpu] = 0;
822 /* see if the nmi watchdog went off */
824 if (nmi_watchdog == NMI_LOCAL_APIC) {
825 rdmsrl(wd->perfctr_msr, dummy);
826 if (dummy & wd->check_bit){
827 /* this wasn't a watchdog timer interrupt */
831 /* only Intel P4 uses the cccr msr */
832 if (wd->cccr_msr != 0) {
835 * - An overflown perfctr will assert its interrupt
836 * until the OVF flag in its CCCR is cleared.
837 * - LVTPC is masked on interrupt and must be
838 * unmasked by the LVTPC handler.
840 rdmsrl(wd->cccr_msr, dummy);
841 dummy &= ~P4_CCCR_OVF;
842 wrmsrl(wd->cccr_msr, dummy);
843 apic_write(APIC_LVTPC, APIC_DM_NMI);
845 else if (wd->perfctr_msr == MSR_P6_PERFCTR0) {
846 /* Only P6 based Pentium M need to re-unmask
847 * the apic vector but it doesn't hurt
848 * other P6 variant */
849 apic_write(APIC_LVTPC, APIC_DM_NMI);
851 /* start the cycle over again */
852 write_watchdog_counter(wd->perfctr_msr, NULL);
861 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
863 unsigned char reason = get_nmi_reason();
866 if (!(reason & 0xc0)) {
867 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
874 * proc handler for /proc/sys/kernel/unknown_nmi_panic
876 int proc_unknown_nmi_panic(ctl_table *table, int write, struct file *file,
877 void __user *buffer, size_t *length, loff_t *ppos)
881 old_state = unknown_nmi_panic;
882 proc_dointvec(table, write, file, buffer, length, ppos);
883 if (!!old_state == !!unknown_nmi_panic)
886 if (unknown_nmi_panic) {
887 if (reserve_lapic_nmi() < 0) {
888 unknown_nmi_panic = 0;
891 set_nmi_callback(unknown_nmi_panic_callback);
895 unset_nmi_callback();
902 EXPORT_SYMBOL(nmi_active);
903 EXPORT_SYMBOL(nmi_watchdog);
904 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
905 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
906 EXPORT_SYMBOL(reserve_perfctr_nmi);
907 EXPORT_SYMBOL(release_perfctr_nmi);
908 EXPORT_SYMBOL(reserve_evntsel_nmi);
909 EXPORT_SYMBOL(release_evntsel_nmi);
910 EXPORT_SYMBOL(reserve_lapic_nmi);
911 EXPORT_SYMBOL(release_lapic_nmi);
912 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
913 EXPORT_SYMBOL(enable_timer_nmi_watchdog);