2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * Handle hardware traps and faults.
13 #include <linux/spinlock.h>
14 #include <linux/kprobes.h>
15 #include <linux/kdebug.h>
16 #include <linux/nmi.h>
17 #include <linux/delay.h>
18 #include <linux/hardirq.h>
19 #include <linux/slab.h>
21 #include <linux/mca.h>
23 #if defined(CONFIG_EDAC)
24 #include <linux/edac.h>
27 #include <linux/atomic.h>
28 #include <asm/traps.h>
29 #include <asm/mach_traps.h>
32 #define NMI_MAX_NAMELEN 16
34 struct list_head list;
35 nmi_handler_t handler;
42 struct list_head head;
45 static struct nmi_desc nmi_desc[NMI_MAX] =
48 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
49 .head = LIST_HEAD_INIT(nmi_desc[0].head),
52 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
53 .head = LIST_HEAD_INIT(nmi_desc[1].head),
61 unsigned int external;
65 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
67 static int ignore_nmis;
69 int unknown_nmi_panic;
71 * Prevent NMI reason port (0x61) being accessed simultaneously, can
72 * only be used in NMI handler.
74 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
76 static int __init setup_unknown_nmi_panic(char *str)
78 unknown_nmi_panic = 1;
81 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
83 #define nmi_to_desc(type) (&nmi_desc[type])
85 static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
87 struct nmi_desc *desc = nmi_to_desc(type);
94 * NMIs are edge-triggered, which means if you have enough
95 * of them concurrently, you can lose some because only one
96 * can be latched at any given time. Walk the whole list
97 * to handle those situations.
99 list_for_each_entry_rcu(a, &desc->head, list)
100 handled += a->handler(type, regs);
104 /* return total number of NMI events handled */
108 static int __setup_nmi(unsigned int type, struct nmiaction *action)
110 struct nmi_desc *desc = nmi_to_desc(type);
113 spin_lock_irqsave(&desc->lock, flags);
116 * most handlers of type NMI_UNKNOWN never return because
117 * they just assume the NMI is theirs. Just a sanity check
118 * to manage expectations
120 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
123 * some handlers need to be executed first otherwise a fake
124 * event confuses some handlers (kdump uses this flag)
126 if (action->flags & NMI_FLAG_FIRST)
127 list_add_rcu(&action->list, &desc->head);
129 list_add_tail_rcu(&action->list, &desc->head);
131 spin_unlock_irqrestore(&desc->lock, flags);
135 static struct nmiaction *__free_nmi(unsigned int type, const char *name)
137 struct nmi_desc *desc = nmi_to_desc(type);
141 spin_lock_irqsave(&desc->lock, flags);
143 list_for_each_entry_rcu(n, &desc->head, list) {
145 * the name passed in to describe the nmi handler
146 * is used as the lookup key
148 if (!strcmp(n->name, name)) {
150 "Trying to free NMI (%s) from NMI context!\n", n->name);
151 list_del_rcu(&n->list);
156 spin_unlock_irqrestore(&desc->lock, flags);
161 int register_nmi_handler(unsigned int type, nmi_handler_t handler,
162 unsigned long nmiflags, const char *devname)
164 struct nmiaction *action;
165 int retval = -ENOMEM;
170 action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
174 action->handler = handler;
175 action->flags = nmiflags;
176 action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
178 goto fail_action_name;
180 retval = __setup_nmi(type, action);
195 EXPORT_SYMBOL_GPL(register_nmi_handler);
197 void unregister_nmi_handler(unsigned int type, const char *name)
201 a = __free_nmi(type, name);
208 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
210 static notrace __kprobes void
211 pci_serr_error(unsigned char reason, struct pt_regs *regs)
213 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
214 reason, smp_processor_id());
217 * On some machines, PCI SERR line is used to report memory
218 * errors. EDAC makes use of it.
220 #if defined(CONFIG_EDAC)
221 if (edac_handler_set()) {
222 edac_atomic_assert_error();
227 if (panic_on_unrecovered_nmi)
228 panic("NMI: Not continuing");
230 pr_emerg("Dazed and confused, but trying to continue\n");
232 /* Clear and disable the PCI SERR error line. */
233 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
234 outb(reason, NMI_REASON_PORT);
237 static notrace __kprobes void
238 io_check_error(unsigned char reason, struct pt_regs *regs)
243 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
244 reason, smp_processor_id());
245 show_registers(regs);
248 panic("NMI IOCK error: Not continuing");
250 /* Re-enable the IOCK line, wait for a few seconds */
251 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
252 outb(reason, NMI_REASON_PORT);
256 touch_nmi_watchdog();
260 reason &= ~NMI_REASON_CLEAR_IOCHK;
261 outb(reason, NMI_REASON_PORT);
264 static notrace __kprobes void
265 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
270 * Use 'false' as back-to-back NMIs are dealt with one level up.
271 * Of course this makes having multiple 'unknown' handlers useless
272 * as only the first one is ever run (unless it can actually determine
273 * if it caused the NMI)
275 handled = nmi_handle(NMI_UNKNOWN, regs, false);
277 __this_cpu_add(nmi_stats.unknown, handled);
281 __this_cpu_add(nmi_stats.unknown, 1);
285 * Might actually be able to figure out what the guilty party
293 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
294 reason, smp_processor_id());
296 pr_emerg("Do you have a strange power saving mode enabled?\n");
297 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
298 panic("NMI: Not continuing");
300 pr_emerg("Dazed and confused, but trying to continue\n");
303 static DEFINE_PER_CPU(bool, swallow_nmi);
304 static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
306 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
308 unsigned char reason = 0;
313 * CPU-specific NMI must be processed before non-CPU-specific
314 * NMI, otherwise we may lose it, because the CPU-specific
315 * NMI can not be detected/processed on other CPUs.
319 * Back-to-back NMIs are interesting because they can either
320 * be two NMI or more than two NMIs (any thing over two is dropped
321 * due to NMI being edge-triggered). If this is the second half
322 * of the back-to-back NMI, assume we dropped things and process
323 * more handlers. Otherwise reset the 'swallow' NMI behaviour
325 if (regs->ip == __this_cpu_read(last_nmi_rip))
328 __this_cpu_write(swallow_nmi, false);
330 __this_cpu_write(last_nmi_rip, regs->ip);
332 handled = nmi_handle(NMI_LOCAL, regs, b2b);
333 __this_cpu_add(nmi_stats.normal, handled);
336 * There are cases when a NMI handler handles multiple
337 * events in the current NMI. One of these events may
338 * be queued for in the next NMI. Because the event is
339 * already handled, the next NMI will result in an unknown
340 * NMI. Instead lets flag this for a potential NMI to
344 __this_cpu_write(swallow_nmi, true);
348 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
349 raw_spin_lock(&nmi_reason_lock);
350 reason = get_nmi_reason();
352 if (reason & NMI_REASON_MASK) {
353 if (reason & NMI_REASON_SERR)
354 pci_serr_error(reason, regs);
355 else if (reason & NMI_REASON_IOCHK)
356 io_check_error(reason, regs);
359 * Reassert NMI in case it became active
360 * meanwhile as it's edge-triggered:
364 __this_cpu_add(nmi_stats.external, 1);
365 raw_spin_unlock(&nmi_reason_lock);
368 raw_spin_unlock(&nmi_reason_lock);
371 * Only one NMI can be latched at a time. To handle
372 * this we may process multiple nmi handlers at once to
373 * cover the case where an NMI is dropped. The downside
374 * to this approach is we may process an NMI prematurely,
375 * while its real NMI is sitting latched. This will cause
376 * an unknown NMI on the next run of the NMI processing.
378 * We tried to flag that condition above, by setting the
379 * swallow_nmi flag when we process more than one event.
380 * This condition is also only present on the second half
381 * of a back-to-back NMI, so we flag that condition too.
383 * If both are true, we assume we already processed this
384 * NMI previously and we swallow it. Otherwise we reset
387 * There are scenarios where we may accidentally swallow
388 * a 'real' unknown NMI. For example, while processing
389 * a perf NMI another perf NMI comes in along with a
390 * 'real' unknown NMI. These two NMIs get combined into
391 * one (as descibed above). When the next NMI gets
392 * processed, it will be flagged by perf as handled, but
393 * noone will know that there was a 'real' unknown NMI sent
394 * also. As a result it gets swallowed. Or if the first
395 * perf NMI returns two events handled then the second
396 * NMI will get eaten by the logic below, again losing a
397 * 'real' unknown NMI. But this is the best we can do
400 if (b2b && __this_cpu_read(swallow_nmi))
401 __this_cpu_add(nmi_stats.swallow, 1);
403 unknown_nmi_error(reason, regs);
406 dotraplinkage notrace __kprobes void
407 do_nmi(struct pt_regs *regs, long error_code)
411 inc_irq_stat(__nmi_count);
414 default_do_nmi(regs);
424 void restart_nmi(void)
429 /* reset the back-to-back NMI logic */
430 void local_touch_nmi(void)
432 __this_cpu_write(last_nmi_rip, 0);