2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/sysdev.h>
20 #include <linux/nmi.h>
21 #include <linux/sysctl.h>
22 #include <linux/kprobes.h>
26 #include <asm/proto.h>
27 #include <asm/kdebug.h>
30 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
31 * evtsel_nmi_owner tracks the ownership of the event selection
32 * - different performance counters/ event selection may be reserved for
33 * different subsystems this reservation system just tries to coordinate
36 static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
37 static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
39 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
42 #define NMI_MAX_COUNTER_BITS 66
45 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
46 * - it may be reserved by some other driver, or not
47 * - when not reserved by some other driver, it may be used for
48 * the NMI watchdog, or not
50 * This is maintained separately from nmi_active because the NMI
51 * watchdog may also be driven from the I/O APIC timer.
53 static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
54 static unsigned int lapic_nmi_owner;
55 #define LAPIC_NMI_WATCHDOG (1<<0)
56 #define LAPIC_NMI_RESERVED (1<<1)
59 * >0: the lapic NMI watchdog is active, but can be disabled
60 * <0: the lapic NMI watchdog has not been set up, and cannot
62 * 0: the lapic NMI watchdog is disabled, but can be enabled
64 atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
67 unsigned int nmi_watchdog = NMI_DEFAULT;
68 static unsigned int nmi_hz = HZ;
70 struct nmi_watchdog_ctlblk {
73 unsigned int cccr_msr;
74 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
75 unsigned int evntsel_msr; /* the MSR to select the events to handle */
77 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
79 /* local prototypes */
80 static void stop_apic_nmi_watchdog(void *unused);
81 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
83 /* converts an msr to an appropriate reservation bit */
84 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
86 /* returns the bit offset of the performance counter register */
87 switch (boot_cpu_data.x86_vendor) {
89 return (msr - MSR_K7_PERFCTR0);
90 case X86_VENDOR_INTEL:
91 return (msr - MSR_P4_BPU_PERFCTR0);
96 /* converts an msr to an appropriate reservation bit */
97 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
99 /* returns the bit offset of the event selection register */
100 switch (boot_cpu_data.x86_vendor) {
102 return (msr - MSR_K7_EVNTSEL0);
103 case X86_VENDOR_INTEL:
104 return (msr - MSR_P4_BSU_ESCR0);
109 /* checks for a bit availability (hack for oprofile) */
110 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
112 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
114 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
117 /* checks the an msr for availability */
118 int avail_to_resrv_perfctr_nmi(unsigned int msr)
120 unsigned int counter;
122 counter = nmi_perfctr_msr_to_bit(msr);
123 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
125 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
128 int reserve_perfctr_nmi(unsigned int msr)
130 unsigned int counter;
132 counter = nmi_perfctr_msr_to_bit(msr);
133 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
135 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
140 void release_perfctr_nmi(unsigned int msr)
142 unsigned int counter;
144 counter = nmi_perfctr_msr_to_bit(msr);
145 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
147 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
150 int reserve_evntsel_nmi(unsigned int msr)
152 unsigned int counter;
154 counter = nmi_evntsel_msr_to_bit(msr);
155 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
157 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
162 void release_evntsel_nmi(unsigned int msr)
164 unsigned int counter;
166 counter = nmi_evntsel_msr_to_bit(msr);
167 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
169 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
172 static __cpuinit inline int nmi_known_cpu(void)
174 switch (boot_cpu_data.x86_vendor) {
176 return boot_cpu_data.x86 == 15;
177 case X86_VENDOR_INTEL:
178 return boot_cpu_data.x86 == 15;
183 /* Run after command line and cpu_init init, but before all other checks */
184 void __cpuinit nmi_watchdog_default(void)
186 if (nmi_watchdog != NMI_DEFAULT)
189 nmi_watchdog = NMI_LOCAL_APIC;
191 nmi_watchdog = NMI_IO_APIC;
195 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
196 * the CPU is idle. To make sure the NMI watchdog really ticks on all
197 * CPUs during the test make them busy.
199 static __init void nmi_cpu_busy(void *data)
201 volatile int *endflag = data;
202 local_irq_enable_in_hardirq();
203 /* Intentionally don't use cpu_relax here. This is
204 to make sure that the performance counter really ticks,
205 even if there is a simulator or similar that catches the
206 pause instruction. On a real HT machine this is fine because
207 all other CPUs are busy with "useless" delay loops and don't
208 care if they get somewhat less cycles. */
209 while (*endflag == 0)
214 int __init check_nmi_watchdog (void)
216 volatile int endflag = 0;
220 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
223 if (!atomic_read(&nmi_active))
226 counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
230 printk(KERN_INFO "testing NMI watchdog ... ");
233 if (nmi_watchdog == NMI_LOCAL_APIC)
234 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
237 for (cpu = 0; cpu < NR_CPUS; cpu++)
238 counts[cpu] = cpu_pda(cpu)->__nmi_count;
240 mdelay((10*1000)/nmi_hz); // wait 10 ticks
242 for_each_online_cpu(cpu) {
243 if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
245 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
246 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
249 cpu_pda(cpu)->__nmi_count);
250 per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0;
251 atomic_dec(&nmi_active);
254 if (!atomic_read(&nmi_active)) {
256 atomic_set(&nmi_active, -1);
262 /* now that we know it works we can reduce NMI frequency to
263 something more reasonable; makes a difference in some configs */
264 if (nmi_watchdog == NMI_LOCAL_APIC)
271 int __init setup_nmi_watchdog(char *str)
275 if (!strncmp(str,"panic",5)) {
276 panic_on_timeout = 1;
277 str = strchr(str, ',');
283 get_option(&str, &nmi);
285 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
288 if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0))
289 return 0; /* no lapic support */
294 __setup("nmi_watchdog=", setup_nmi_watchdog);
296 static void disable_lapic_nmi_watchdog(void)
298 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
300 if (atomic_read(&nmi_active) <= 0)
303 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
305 BUG_ON(atomic_read(&nmi_active) != 0);
308 static void enable_lapic_nmi_watchdog(void)
310 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
312 /* are we already enabled */
313 if (atomic_read(&nmi_active) != 0)
316 /* are we lapic aware */
317 if (nmi_known_cpu() <= 0)
320 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
321 touch_nmi_watchdog();
324 int reserve_lapic_nmi(void)
326 unsigned int old_owner;
328 spin_lock(&lapic_nmi_owner_lock);
329 old_owner = lapic_nmi_owner;
330 lapic_nmi_owner |= LAPIC_NMI_RESERVED;
331 spin_unlock(&lapic_nmi_owner_lock);
332 if (old_owner & LAPIC_NMI_RESERVED)
334 if (old_owner & LAPIC_NMI_WATCHDOG)
335 disable_lapic_nmi_watchdog();
339 void release_lapic_nmi(void)
341 unsigned int new_owner;
343 spin_lock(&lapic_nmi_owner_lock);
344 new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
345 lapic_nmi_owner = new_owner;
346 spin_unlock(&lapic_nmi_owner_lock);
347 if (new_owner & LAPIC_NMI_WATCHDOG)
348 enable_lapic_nmi_watchdog();
351 void disable_timer_nmi_watchdog(void)
353 BUG_ON(nmi_watchdog != NMI_IO_APIC);
355 if (atomic_read(&nmi_active) <= 0)
359 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
361 BUG_ON(atomic_read(&nmi_active) != 0);
364 void enable_timer_nmi_watchdog(void)
366 BUG_ON(nmi_watchdog != NMI_IO_APIC);
368 if (atomic_read(&nmi_active) == 0) {
369 touch_nmi_watchdog();
370 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
377 static int nmi_pm_active; /* nmi_active before suspend */
379 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
381 nmi_pm_active = atomic_read(&nmi_active);
382 disable_lapic_nmi_watchdog();
386 static int lapic_nmi_resume(struct sys_device *dev)
388 if (nmi_pm_active > 0)
389 enable_lapic_nmi_watchdog();
393 static struct sysdev_class nmi_sysclass = {
394 set_kset_name("lapic_nmi"),
395 .resume = lapic_nmi_resume,
396 .suspend = lapic_nmi_suspend,
399 static struct sys_device device_lapic_nmi = {
401 .cls = &nmi_sysclass,
404 static int __init init_lapic_nmi_sysfs(void)
408 /* should really be a BUG_ON but b/c this is an
409 * init call, it just doesn't work. -dcz
411 if (nmi_watchdog != NMI_LOCAL_APIC)
414 if ( atomic_read(&nmi_active) < 0 )
417 error = sysdev_class_register(&nmi_sysclass);
419 error = sysdev_register(&device_lapic_nmi);
422 /* must come after the local APIC's device_initcall() */
423 late_initcall(init_lapic_nmi_sysfs);
425 #endif /* CONFIG_PM */
428 * Activate the NMI watchdog via the local APIC.
429 * Original code written by Keith Owens.
432 /* Note that these events don't tick when the CPU idles. This means
433 the frequency varies with CPU load. */
435 #define K7_EVNTSEL_ENABLE (1 << 22)
436 #define K7_EVNTSEL_INT (1 << 20)
437 #define K7_EVNTSEL_OS (1 << 17)
438 #define K7_EVNTSEL_USR (1 << 16)
439 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
440 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
442 static int setup_k7_watchdog(void)
444 unsigned int perfctr_msr, evntsel_msr;
445 unsigned int evntsel;
446 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
448 perfctr_msr = MSR_K7_PERFCTR0;
449 evntsel_msr = MSR_K7_EVNTSEL0;
450 if (!reserve_perfctr_nmi(perfctr_msr))
453 if (!reserve_evntsel_nmi(evntsel_msr))
456 /* Simulator may not support it */
457 if (checking_wrmsrl(evntsel_msr, 0UL))
459 wrmsrl(perfctr_msr, 0UL);
461 evntsel = K7_EVNTSEL_INT
466 /* setup the timer */
467 wrmsr(evntsel_msr, evntsel, 0);
468 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
469 apic_write(APIC_LVTPC, APIC_DM_NMI);
470 evntsel |= K7_EVNTSEL_ENABLE;
471 wrmsr(evntsel_msr, evntsel, 0);
473 wd->perfctr_msr = perfctr_msr;
474 wd->evntsel_msr = evntsel_msr;
475 wd->cccr_msr = 0; //unused
476 wd->check_bit = 1ULL<<63;
479 release_evntsel_nmi(evntsel_msr);
481 release_perfctr_nmi(perfctr_msr);
486 static void stop_k7_watchdog(void)
488 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
490 wrmsr(wd->evntsel_msr, 0, 0);
492 release_evntsel_nmi(wd->evntsel_msr);
493 release_perfctr_nmi(wd->perfctr_msr);
496 /* Note that these events don't tick when the CPU idles. This means
497 the frequency varies with CPU load. */
499 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
500 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
501 #define P4_ESCR_OS (1<<3)
502 #define P4_ESCR_USR (1<<2)
503 #define P4_CCCR_OVF_PMI0 (1<<26)
504 #define P4_CCCR_OVF_PMI1 (1<<27)
505 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
506 #define P4_CCCR_COMPLEMENT (1<<19)
507 #define P4_CCCR_COMPARE (1<<18)
508 #define P4_CCCR_REQUIRED (3<<16)
509 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
510 #define P4_CCCR_ENABLE (1<<12)
511 #define P4_CCCR_OVF (1<<31)
512 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
513 CRU_ESCR0 (with any non-null event selector) through a complemented
514 max threshold. [IA32-Vol3, Section 14.9.9] */
516 static int setup_p4_watchdog(void)
518 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
519 unsigned int evntsel, cccr_val;
520 unsigned int misc_enable, dummy;
522 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
524 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
525 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
529 /* detect which hyperthread we are on */
530 if (smp_num_siblings == 2) {
531 unsigned int ebx, apicid;
534 apicid = (ebx >> 24) & 0xff;
540 /* performance counters are shared resources
541 * assign each hyperthread its own set
542 * (re-use the ESCR0 register, seems safe
543 * and keeps the cccr_val the same)
547 perfctr_msr = MSR_P4_IQ_PERFCTR0;
548 evntsel_msr = MSR_P4_CRU_ESCR0;
549 cccr_msr = MSR_P4_IQ_CCCR0;
550 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
553 perfctr_msr = MSR_P4_IQ_PERFCTR1;
554 evntsel_msr = MSR_P4_CRU_ESCR0;
555 cccr_msr = MSR_P4_IQ_CCCR1;
556 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
559 if (!reserve_perfctr_nmi(perfctr_msr))
562 if (!reserve_evntsel_nmi(evntsel_msr))
565 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
569 cccr_val |= P4_CCCR_THRESHOLD(15)
574 wrmsr(evntsel_msr, evntsel, 0);
575 wrmsr(cccr_msr, cccr_val, 0);
576 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
577 apic_write(APIC_LVTPC, APIC_DM_NMI);
578 cccr_val |= P4_CCCR_ENABLE;
579 wrmsr(cccr_msr, cccr_val, 0);
581 wd->perfctr_msr = perfctr_msr;
582 wd->evntsel_msr = evntsel_msr;
583 wd->cccr_msr = cccr_msr;
584 wd->check_bit = 1ULL<<39;
587 release_perfctr_nmi(perfctr_msr);
592 static void stop_p4_watchdog(void)
594 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
596 wrmsr(wd->cccr_msr, 0, 0);
597 wrmsr(wd->evntsel_msr, 0, 0);
599 release_evntsel_nmi(wd->evntsel_msr);
600 release_perfctr_nmi(wd->perfctr_msr);
603 void setup_apic_nmi_watchdog(void *unused)
605 /* only support LOCAL and IO APICs for now */
606 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
607 (nmi_watchdog != NMI_IO_APIC))
610 if (nmi_watchdog == NMI_LOCAL_APIC) {
611 switch (boot_cpu_data.x86_vendor) {
613 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
615 if (!setup_k7_watchdog())
618 case X86_VENDOR_INTEL:
619 if (!setup_p4_watchdog())
626 __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 1;
627 atomic_inc(&nmi_active);
630 static void stop_apic_nmi_watchdog(void *unused)
632 /* only support LOCAL and IO APICs for now */
633 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
634 (nmi_watchdog != NMI_IO_APIC))
637 if (nmi_watchdog == NMI_LOCAL_APIC) {
638 switch (boot_cpu_data.x86_vendor) {
640 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
644 case X86_VENDOR_INTEL:
651 __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 0;
652 atomic_dec(&nmi_active);
656 * the best way to detect whether a CPU has a 'hard lockup' problem
657 * is to check it's local APIC timer IRQ counts. If they are not
658 * changing then that CPU has some problem.
660 * as these watchdog NMI IRQs are generated on every CPU, we only
661 * have to check the current processor.
664 static DEFINE_PER_CPU(unsigned, last_irq_sum);
665 static DEFINE_PER_CPU(local_t, alert_counter);
666 static DEFINE_PER_CPU(int, nmi_touch);
668 void touch_nmi_watchdog (void)
670 if (nmi_watchdog > 0) {
674 * Tell other CPUs to reset their alert counters. We cannot
675 * do it ourselves because the alert count increase is not
678 for_each_present_cpu (cpu)
679 per_cpu(nmi_touch, cpu) = 1;
682 touch_softlockup_watchdog();
685 int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
689 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
693 /* check for other users first */
694 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
700 sum = read_pda(apic_timer_irqs);
701 if (__get_cpu_var(nmi_touch)) {
702 __get_cpu_var(nmi_touch) = 0;
706 #ifdef CONFIG_X86_MCE
707 /* Could check oops_in_progress here too, but it's safer
709 if (atomic_read(&mce_entry) > 0)
712 /* if the apic timer isn't firing, this cpu isn't doing much */
713 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
715 * Ayiee, looks like this CPU is stuck ...
716 * wait a few IRQs (5 seconds) before doing the oops ...
718 local_inc(&__get_cpu_var(alert_counter));
719 if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz)
720 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs);
722 __get_cpu_var(last_irq_sum) = sum;
723 local_set(&__get_cpu_var(alert_counter), 0);
726 /* see if the nmi watchdog went off */
728 if (nmi_watchdog == NMI_LOCAL_APIC) {
729 rdmsrl(wd->perfctr_msr, dummy);
730 if (dummy & wd->check_bit){
731 /* this wasn't a watchdog timer interrupt */
735 /* only Intel uses the cccr msr */
736 if (wd->cccr_msr != 0) {
739 * - An overflown perfctr will assert its interrupt
740 * until the OVF flag in its CCCR is cleared.
741 * - LVTPC is masked on interrupt and must be
742 * unmasked by the LVTPC handler.
744 rdmsrl(wd->cccr_msr, dummy);
745 dummy &= ~P4_CCCR_OVF;
746 wrmsrl(wd->cccr_msr, dummy);
747 apic_write(APIC_LVTPC, APIC_DM_NMI);
749 /* start the cycle over again */
750 wrmsrl(wd->perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
752 } else if (nmi_watchdog == NMI_IO_APIC) {
753 /* don't know how to accurately check for this.
754 * just assume it was a watchdog timer interrupt
755 * This matches the old behaviour.
759 printk(KERN_WARNING "Unknown enabled NMI hardware?!\n");
765 static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu)
770 static nmi_callback_t nmi_callback = dummy_nmi_callback;
772 asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
775 add_pda(__nmi_count,1);
776 default_do_nmi(regs);
780 int do_nmi_callback(struct pt_regs * regs, int cpu)
782 return rcu_dereference(nmi_callback)(regs, cpu);
785 void set_nmi_callback(nmi_callback_t callback)
788 rcu_assign_pointer(nmi_callback, callback);
790 EXPORT_SYMBOL_GPL(set_nmi_callback);
792 void unset_nmi_callback(void)
794 nmi_callback = dummy_nmi_callback;
796 EXPORT_SYMBOL_GPL(unset_nmi_callback);
800 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
802 unsigned char reason = get_nmi_reason();
805 if (!(reason & 0xc0)) {
806 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
813 * proc handler for /proc/sys/kernel/unknown_nmi_panic
815 int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
816 void __user *buffer, size_t *length, loff_t *ppos)
820 old_state = unknown_nmi_panic;
821 proc_dointvec(table, write, file, buffer, length, ppos);
822 if (!!old_state == !!unknown_nmi_panic)
825 if (unknown_nmi_panic) {
826 if (reserve_lapic_nmi() < 0) {
827 unknown_nmi_panic = 0;
830 set_nmi_callback(unknown_nmi_panic_callback);
834 unset_nmi_callback();
841 EXPORT_SYMBOL(nmi_active);
842 EXPORT_SYMBOL(nmi_watchdog);
843 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
844 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
845 EXPORT_SYMBOL(reserve_perfctr_nmi);
846 EXPORT_SYMBOL(release_perfctr_nmi);
847 EXPORT_SYMBOL(reserve_evntsel_nmi);
848 EXPORT_SYMBOL(release_evntsel_nmi);
849 EXPORT_SYMBOL(reserve_lapic_nmi);
850 EXPORT_SYMBOL(release_lapic_nmi);
851 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
852 EXPORT_SYMBOL(enable_timer_nmi_watchdog);
853 EXPORT_SYMBOL(touch_nmi_watchdog);