2 * Machine check handler.
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
10 #include <linux/thread_info.h>
11 #include <linux/capability.h>
12 #include <linux/miscdevice.h>
13 #include <linux/interrupt.h>
14 #include <linux/ratelimit.h>
15 #include <linux/kallsyms.h>
16 #include <linux/rcupdate.h>
17 #include <linux/kobject.h>
18 #include <linux/uaccess.h>
19 #include <linux/kdebug.h>
20 #include <linux/kernel.h>
21 #include <linux/percpu.h>
22 #include <linux/string.h>
23 #include <linux/sysdev.h>
24 #include <linux/delay.h>
25 #include <linux/ctype.h>
26 #include <linux/sched.h>
27 #include <linux/sysfs.h>
28 #include <linux/types.h>
29 #include <linux/init.h>
30 #include <linux/kmod.h>
31 #include <linux/poll.h>
32 #include <linux/nmi.h>
33 #include <linux/cpu.h>
34 #include <linux/smp.h>
37 #include <asm/processor.h>
38 #include <asm/hw_irq.h>
45 #include "mce-internal.h"
48 /* Handle unconfigured int18 (should never happen) */
49 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
51 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
55 /* Call the installed machine check handler for this CPU setup. */
56 void (*machine_check_vector)(struct pt_regs *, long error_code) =
57 unexpected_machine_check;
61 #ifdef CONFIG_X86_NEW_MCE
63 #define MISC_MCELOG_MINOR 227
65 #define SPINUNIT 100 /* 100ns */
69 DEFINE_PER_CPU(unsigned, mce_exception_count);
73 * 0: always panic on uncorrected errors, log corrected errors
74 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
75 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
76 * 3: never panic or SIGBUS, log all errors (for testing only)
78 static int tolerant = 1;
81 static unsigned long notify_user;
83 static int mce_bootlog = -1;
84 static int monarch_timeout = -1;
86 static char trigger[128];
87 static char *trigger_argv[2] = { trigger, NULL };
89 static unsigned long dont_init_banks;
91 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
92 static DEFINE_PER_CPU(struct mce, mces_seen);
93 static int cpu_missing;
96 /* MCA banks polled by the period polling timer for corrected events */
97 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
98 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
101 static inline int skip_bank_init(int i)
103 return i < BITS_PER_LONG && test_bit(i, &dont_init_banks);
106 /* Do initial initialization of a struct mce */
107 void mce_setup(struct mce *m)
109 memset(m, 0, sizeof(struct mce));
110 m->cpu = m->extcpu = smp_processor_id();
112 /* We hope get_seconds stays lockless */
113 m->time = get_seconds();
114 m->cpuvendor = boot_cpu_data.x86_vendor;
115 m->cpuid = cpuid_eax(1);
117 m->socketid = cpu_data(m->extcpu).phys_proc_id;
119 m->apicid = cpu_data(m->extcpu).initial_apicid;
120 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
123 DEFINE_PER_CPU(struct mce, injectm);
124 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
127 * Lockless MCE logging infrastructure.
128 * This avoids deadlocks on printk locks without having to break locks. Also
129 * separate MCEs from kernel messages to avoid bogus bug reports.
132 static struct mce_log mcelog = {
133 .signature = MCE_LOG_SIGNATURE,
135 .recordlen = sizeof(struct mce),
138 void mce_log(struct mce *mce)
140 unsigned next, entry;
145 entry = rcu_dereference(mcelog.next);
148 * When the buffer fills up discard new entries.
149 * Assume that the earlier errors are the more
152 if (entry >= MCE_LOG_LEN) {
153 set_bit(MCE_OVERFLOW,
154 (unsigned long *)&mcelog.flags);
157 /* Old left over entry. Skip: */
158 if (mcelog.entry[entry].finished) {
166 if (cmpxchg(&mcelog.next, entry, next) == entry)
169 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
171 mcelog.entry[entry].finished = 1;
175 set_bit(0, ¬ify_user);
178 static void print_mce(struct mce *m)
180 printk(KERN_EMERG "\n"
181 KERN_EMERG "HARDWARE ERROR\n"
183 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
184 m->extcpu, m->mcgstatus, m->bank, m->status);
186 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
187 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
189 if (m->cs == __KERNEL_CS)
190 print_symbol("{%s}", m->ip);
193 printk(KERN_EMERG "TSC %llx ", m->tsc);
195 printk("ADDR %llx ", m->addr);
197 printk("MISC %llx ", m->misc);
199 printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
200 m->cpuvendor, m->cpuid, m->time, m->socketid,
202 printk(KERN_EMERG "This is not a software problem!\n");
203 printk(KERN_EMERG "Run through mcelog --ascii to decode "
204 "and contact your hardware vendor\n");
207 #define PANIC_TIMEOUT 5 /* 5 seconds */
209 static atomic_t mce_paniced;
211 /* Panic in progress. Enable interrupts and wait for final IPI */
212 static void wait_for_panic(void)
214 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
217 while (timeout-- > 0)
219 panic("Panicing machine check CPU died");
222 static void mce_panic(char *msg, struct mce *final, char *exp)
227 * Make sure only one CPU runs in machine check panic
229 if (atomic_add_return(1, &mce_paniced) > 1)
235 /* First print corrected ones that are still unlogged */
236 for (i = 0; i < MCE_LOG_LEN; i++) {
237 struct mce *m = &mcelog.entry[i];
238 if ((m->status & MCI_STATUS_VAL) &&
239 !(m->status & MCI_STATUS_UC))
242 /* Now print uncorrected but with the final one last */
243 for (i = 0; i < MCE_LOG_LEN; i++) {
244 struct mce *m = &mcelog.entry[i];
245 if (!(m->status & MCI_STATUS_VAL))
247 if (!final || memcmp(m, final, sizeof(struct mce)))
253 printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n");
255 printk(KERN_EMERG "Machine check: %s\n", exp);
259 /* Support code for software error injection */
261 static int msr_to_offset(u32 msr)
263 unsigned bank = __get_cpu_var(injectm.bank);
265 return offsetof(struct mce, ip);
266 if (msr == MSR_IA32_MC0_STATUS + bank*4)
267 return offsetof(struct mce, status);
268 if (msr == MSR_IA32_MC0_ADDR + bank*4)
269 return offsetof(struct mce, addr);
270 if (msr == MSR_IA32_MC0_MISC + bank*4)
271 return offsetof(struct mce, misc);
272 if (msr == MSR_IA32_MCG_STATUS)
273 return offsetof(struct mce, mcgstatus);
277 /* MSR access wrappers used for error injection */
278 static u64 mce_rdmsrl(u32 msr)
281 if (__get_cpu_var(injectm).finished) {
282 int offset = msr_to_offset(msr);
285 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
291 static void mce_wrmsrl(u32 msr, u64 v)
293 if (__get_cpu_var(injectm).finished) {
294 int offset = msr_to_offset(msr);
296 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
302 int mce_available(struct cpuinfo_x86 *c)
306 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
309 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
311 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
319 /* Assume the RIP in the MSR is exact. Is this true? */
320 m->mcgstatus |= MCG_STATUS_EIPV;
321 m->ip = mce_rdmsrl(rip_msr);
326 #ifdef CONFIG_X86_LOCAL_APIC
328 * Called after interrupts have been reenabled again
329 * when a MCE happened during an interrupts off region
332 asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs)
342 static void mce_report_event(struct pt_regs *regs)
344 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
349 #ifdef CONFIG_X86_LOCAL_APIC
351 * Without APIC do not notify. The event will be picked
358 * When interrupts are disabled we cannot use
359 * kernel services safely. Trigger an self interrupt
360 * through the APIC to instead do the notification
361 * after interrupts are reenabled again.
363 apic->send_IPI_self(MCE_SELF_VECTOR);
366 * Wait for idle afterwards again so that we don't leave the
367 * APIC in a non idle state because the normal APIC writes
370 apic_wait_icr_idle();
374 DEFINE_PER_CPU(unsigned, mce_poll_count);
377 * Poll for corrected events or events that happened before reset.
378 * Those are just logged through /dev/mcelog.
380 * This is executed in standard interrupt context.
382 void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
387 __get_cpu_var(mce_poll_count)++;
391 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
392 for (i = 0; i < banks; i++) {
393 if (!bank[i] || !test_bit(i, *b))
402 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
403 if (!(m.status & MCI_STATUS_VAL))
407 * Uncorrected events are handled by the exception handler
408 * when it is enabled. But when the exception is disabled log
411 * TBD do the same check for MCI_STATUS_EN here?
413 if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC))
416 if (m.status & MCI_STATUS_MISCV)
417 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4);
418 if (m.status & MCI_STATUS_ADDRV)
419 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4);
421 if (!(flags & MCP_TIMESTAMP))
424 * Don't get the IP here because it's unlikely to
425 * have anything to do with the actual error location.
427 if (!(flags & MCP_DONTLOG)) {
429 add_taint(TAINT_MACHINE_CHECK);
433 * Clear state for this bank.
435 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
439 * Don't clear MCG_STATUS here because it's only defined for
445 EXPORT_SYMBOL_GPL(machine_check_poll);
448 * Do a quick check if any of the events requires a panic.
449 * This decides if we keep the events around or clear them.
451 static int mce_no_way_out(struct mce *m, char **msg)
455 for (i = 0; i < banks; i++) {
456 m->status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
457 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
464 * Variable to establish order between CPUs while scanning.
465 * Each CPU spins initially until executing is equal its number.
467 static atomic_t mce_executing;
470 * Defines order of CPUs on entry. First CPU becomes Monarch.
472 static atomic_t mce_callin;
475 * Check if a timeout waiting for other CPUs happened.
477 static int mce_timed_out(u64 *t)
480 * The others already did panic for some reason.
481 * Bail out like in a timeout.
482 * rmb() to tell the compiler that system_state
483 * might have been modified by someone else.
486 if (atomic_read(&mce_paniced))
488 if (!monarch_timeout)
490 if ((s64)*t < SPINUNIT) {
491 /* CHECKME: Make panic default for 1 too? */
493 mce_panic("Timeout synchronizing machine check over CPUs",
500 touch_nmi_watchdog();
505 * The Monarch's reign. The Monarch is the CPU who entered
506 * the machine check handler first. It waits for the others to
507 * raise the exception too and then grades them. When any
508 * error is fatal panic. Only then let the others continue.
510 * The other CPUs entering the MCE handler will be controlled by the
511 * Monarch. They are called Subjects.
513 * This way we prevent any potential data corruption in a unrecoverable case
514 * and also makes sure always all CPU's errors are examined.
516 * Also this detects the case of an machine check event coming from outer
517 * space (not detected by any CPUs) In this case some external agent wants
518 * us to shut down, so panic too.
520 * The other CPUs might still decide to panic if the handler happens
521 * in a unrecoverable place, but in this case the system is in a semi-stable
522 * state and won't corrupt anything by itself. It's ok to let the others
523 * continue for a bit first.
525 * All the spin loops have timeouts; when a timeout happens a CPU
526 * typically elects itself to be Monarch.
528 static void mce_reign(void)
531 struct mce *m = NULL;
532 int global_worst = 0;
537 * This CPU is the Monarch and the other CPUs have run
538 * through their handlers.
539 * Grade the severity of the errors of all the CPUs.
541 for_each_possible_cpu(cpu) {
542 int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
544 if (severity > global_worst) {
546 global_worst = severity;
547 m = &per_cpu(mces_seen, cpu);
552 * Cannot recover? Panic here then.
553 * This dumps all the mces in the log buffer and stops the
556 if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
557 mce_panic("Fatal Machine check", m, msg);
560 * For UC somewhere we let the CPU who detects it handle it.
561 * Also must let continue the others, otherwise the handling
562 * CPU could deadlock on a lock.
566 * No machine check event found. Must be some external
567 * source or one CPU is hung. Panic.
569 if (!m && tolerant < 3)
570 mce_panic("Machine check from unknown source", NULL, NULL);
573 * Now clear all the mces_seen so that they don't reappear on
576 for_each_possible_cpu(cpu)
577 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
580 static atomic_t global_nwo;
583 * Start of Monarch synchronization. This waits until all CPUs have
584 * entered the exception handler and then determines if any of them
585 * saw a fatal event that requires panic. Then it executes them
586 * in the entry order.
587 * TBD double check parallel CPU hotunplug
589 static int mce_start(int no_way_out, int *order)
592 int cpus = num_online_cpus();
593 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
600 atomic_add(no_way_out, &global_nwo);
605 while (atomic_read(&mce_callin) != cpus) {
606 if (mce_timed_out(&timeout)) {
607 atomic_set(&global_nwo, 0);
615 * Cache the global no_way_out state.
617 nwo = atomic_read(&global_nwo);
620 * Monarch starts executing now, the others wait.
623 atomic_set(&mce_executing, 1);
628 * Now start the scanning loop one by one
629 * in the original callin order.
630 * This way when there are any shared banks it will
631 * be only seen by one CPU before cleared, avoiding duplicates.
633 while (atomic_read(&mce_executing) < *order) {
634 if (mce_timed_out(&timeout)) {
635 atomic_set(&global_nwo, 0);
645 * Synchronize between CPUs after main scanning loop.
646 * This invokes the bulk of the Monarch processing.
648 static int mce_end(int order)
651 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
659 * Allow others to run.
661 atomic_inc(&mce_executing);
664 /* CHECKME: Can this race with a parallel hotplug? */
665 int cpus = num_online_cpus();
668 * Monarch: Wait for everyone to go through their scanning
671 while (atomic_read(&mce_executing) <= cpus) {
672 if (mce_timed_out(&timeout))
682 * Subject: Wait for Monarch to finish.
684 while (atomic_read(&mce_executing) != 0) {
685 if (mce_timed_out(&timeout))
691 * Don't reset anything. That's done by the Monarch.
697 * Reset all global state.
700 atomic_set(&global_nwo, 0);
701 atomic_set(&mce_callin, 0);
705 * Let others run again.
707 atomic_set(&mce_executing, 0);
711 static void mce_clear_state(unsigned long *toclear)
715 for (i = 0; i < banks; i++) {
716 if (test_bit(i, toclear))
717 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
722 * The actual machine check handler. This only handles real
723 * exceptions when something got corrupted coming in through int 18.
725 * This is executed in NMI context not subject to normal locking rules. This
726 * implies that most kernel services cannot be safely used. Don't even
727 * think about putting a printk in there!
729 * On Intel systems this is entered on all CPUs in parallel through
730 * MCE broadcast. However some CPUs might be broken beyond repair,
731 * so be always careful when synchronizing with others.
733 void do_machine_check(struct pt_regs *regs, long error_code)
735 struct mce m, *final;
740 * Establish sequential order between the CPUs entering the machine
746 * If no_way_out gets set, there is no safe way to recover from this
747 * MCE. If tolerant is cranked up, we'll try anyway.
751 * If kill_it gets set, there might be a way to recover from this
755 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
756 char *msg = "Unknown";
758 atomic_inc(&mce_entry);
760 __get_cpu_var(mce_exception_count)++;
762 if (notify_die(DIE_NMI, "machine check", regs, error_code,
763 18, SIGKILL) == NOTIFY_STOP)
768 order = atomic_add_return(1, &mce_callin);
771 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
772 no_way_out = mce_no_way_out(&m, &msg);
774 final = &__get_cpu_var(mces_seen);
780 * Go through all the banks in exclusion of the other CPUs.
781 * This way we don't report duplicated events on shared banks
782 * because the first one to see it will clear it.
784 no_way_out = mce_start(no_way_out, &order);
785 for (i = 0; i < banks; i++) {
786 __clear_bit(i, toclear);
794 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
795 if ((m.status & MCI_STATUS_VAL) == 0)
799 * Non uncorrected errors are handled by machine_check_poll
800 * Leave them alone, unless this panics.
802 if ((m.status & MCI_STATUS_UC) == 0 && !no_way_out)
806 * Set taint even when machine check was not enabled.
808 add_taint(TAINT_MACHINE_CHECK);
810 __set_bit(i, toclear);
812 if (m.status & MCI_STATUS_EN) {
814 * If this error was uncorrectable and there was
815 * an overflow, we're in trouble. If no overflow,
816 * we might get away with just killing a task.
818 if (m.status & MCI_STATUS_UC)
822 * Machine check event was not enabled. Clear, but
828 if (m.status & MCI_STATUS_MISCV)
829 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4);
830 if (m.status & MCI_STATUS_ADDRV)
831 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4);
833 mce_get_rip(&m, regs);
836 severity = mce_severity(&m, tolerant, NULL);
837 if (severity > worst) {
844 mce_clear_state(toclear);
847 * Do most of the synchronization with other CPUs.
848 * When there's any problem use only local no_way_out state.
850 if (mce_end(order) < 0)
851 no_way_out = worst >= MCE_PANIC_SEVERITY;
854 * If we have decided that we just CAN'T continue, and the user
855 * has not set tolerant to an insane level, give up and die.
857 * This is mainly used in the case when the system doesn't
858 * support MCE broadcasting or it has been disabled.
860 if (no_way_out && tolerant < 3)
861 mce_panic("Fatal machine check on current CPU", final, msg);
864 * If the error seems to be unrecoverable, something should be
865 * done. Try to kill as little as possible. If we can kill just
866 * one task, do that. If the user has set the tolerance very
867 * high, don't try to do anything at all.
869 if (kill_it && tolerant < 3) {
873 * If the EIPV bit is set, it means the saved IP is the
874 * instruction which caused the MCE.
876 if (m.mcgstatus & MCG_STATUS_EIPV)
877 user_space = final->ip && (final->cs & 3);
880 * If we know that the error was in user space, send a
881 * SIGBUS. Otherwise, panic if tolerance is low.
883 * force_sig() takes an awful lot of locks and has a slight
884 * risk of deadlocking.
887 force_sig(SIGBUS, current);
888 } else if (panic_on_oops || tolerant < 2) {
889 mce_panic("Uncorrected machine check", final, msg);
893 /* notify userspace ASAP */
894 set_thread_flag(TIF_MCE_NOTIFY);
897 mce_report_event(regs);
898 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
900 atomic_dec(&mce_entry);
903 EXPORT_SYMBOL_GPL(do_machine_check);
905 #ifdef CONFIG_X86_MCE_INTEL
907 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
908 * @cpu: The CPU on which the event occurred.
909 * @status: Event status information
911 * This function should be called by the thermal interrupt after the
912 * event has been processed and the decision was made to log the event
915 * The status parameter will be saved to the 'status' field of 'struct mce'
916 * and historically has been the register value of the
917 * MSR_IA32_THERMAL_STATUS (Intel) msr.
919 void mce_log_therm_throt_event(__u64 status)
924 m.bank = MCE_THERMAL_BANK;
928 #endif /* CONFIG_X86_MCE_INTEL */
931 * Periodic polling timer for "silent" machine check errors. If the
932 * poller finds an MCE, poll 2x faster. When the poller finds no more
933 * errors, poll 2x slower (up to check_interval seconds).
935 static int check_interval = 5 * 60; /* 5 minutes */
937 static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
938 static DEFINE_PER_CPU(struct timer_list, mce_timer);
940 static void mcheck_timer(unsigned long data)
942 struct timer_list *t = &per_cpu(mce_timer, data);
945 WARN_ON(smp_processor_id() != data);
947 if (mce_available(¤t_cpu_data)) {
948 machine_check_poll(MCP_TIMESTAMP,
949 &__get_cpu_var(mce_poll_banks));
953 * Alert userspace if needed. If we logged an MCE, reduce the
954 * polling interval, otherwise increase the polling interval.
956 n = &__get_cpu_var(next_interval);
957 if (mce_notify_user())
958 *n = max(*n/2, HZ/100);
960 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
962 t->expires = jiffies + *n;
966 static void mce_do_trigger(struct work_struct *work)
968 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
971 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
974 * Notify the user(s) about new machine check events.
975 * Can be called from interrupt context, but not from machine check/NMI
978 int mce_notify_user(void)
980 /* Not more than two messages every minute */
981 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
983 clear_thread_flag(TIF_MCE_NOTIFY);
985 if (test_and_clear_bit(0, ¬ify_user)) {
986 wake_up_interruptible(&mce_wait);
989 * There is no risk of missing notifications because
990 * work_pending is always cleared before the function is
993 if (trigger[0] && !work_pending(&mce_trigger_work))
994 schedule_work(&mce_trigger_work);
996 if (__ratelimit(&ratelimit))
997 printk(KERN_INFO "Machine check events logged\n");
1003 EXPORT_SYMBOL_GPL(mce_notify_user);
1006 * Initialize Machine Checks for a CPU.
1008 static int mce_cap_init(void)
1013 rdmsrl(MSR_IA32_MCG_CAP, cap);
1015 b = cap & MCG_BANKCNT_MASK;
1016 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
1018 if (b > MAX_NR_BANKS) {
1020 "MCE: Using only %u machine check banks out of %u\n",
1025 /* Don't support asymmetric configurations today */
1026 WARN_ON(banks != 0 && b != banks);
1029 bank = kmalloc(banks * sizeof(u64), GFP_KERNEL);
1032 memset(bank, 0xff, banks * sizeof(u64));
1035 /* Use accurate RIP reporting if available. */
1036 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1037 rip_msr = MSR_IA32_MCG_EIP;
1042 static void mce_init(void)
1044 mce_banks_t all_banks;
1049 * Log the machine checks left over from the previous reset.
1051 bitmap_fill(all_banks, MAX_NR_BANKS);
1052 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
1054 set_in_cr4(X86_CR4_MCE);
1056 rdmsrl(MSR_IA32_MCG_CAP, cap);
1057 if (cap & MCG_CTL_P)
1058 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1060 for (i = 0; i < banks; i++) {
1061 if (skip_bank_init(i))
1063 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
1064 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
1068 /* Add per CPU specific workarounds here */
1069 static void mce_cpu_quirks(struct cpuinfo_x86 *c)
1071 /* This should be disabled by the BIOS, but isn't always */
1072 if (c->x86_vendor == X86_VENDOR_AMD) {
1073 if (c->x86 == 15 && banks > 4) {
1075 * disable GART TBL walk error reporting, which
1076 * trips off incorrectly with the IOMMU & 3ware
1079 clear_bit(10, (unsigned long *)&bank[4]);
1081 if (c->x86 <= 17 && mce_bootlog < 0) {
1083 * Lots of broken BIOS around that don't clear them
1084 * by default and leave crap in there. Don't log:
1089 * Various K7s with broken bank 0 around. Always disable
1096 if (c->x86_vendor == X86_VENDOR_INTEL) {
1098 * SDM documents that on family 6 bank 0 should not be written
1099 * because it aliases to another special BIOS controlled
1101 * But it's not aliased anymore on model 0x1a+
1102 * Don't ignore bank 0 completely because there could be a
1103 * valid event later, merely don't write CTL0.
1106 if (c->x86 == 6 && c->x86_model < 0x1A)
1107 __set_bit(0, &dont_init_banks);
1110 * All newer Intel systems support MCE broadcasting. Enable
1111 * synchronization with a one second timeout.
1113 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1114 monarch_timeout < 0)
1115 monarch_timeout = USEC_PER_SEC;
1117 if (monarch_timeout < 0)
1118 monarch_timeout = 0;
1121 static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
1125 switch (c->x86_vendor) {
1126 case X86_VENDOR_INTEL:
1127 if (mce_p5_enabled())
1128 intel_p5_mcheck_init(c);
1130 case X86_VENDOR_CENTAUR:
1131 winchip_mcheck_init(c);
1136 static void mce_cpu_features(struct cpuinfo_x86 *c)
1138 switch (c->x86_vendor) {
1139 case X86_VENDOR_INTEL:
1140 mce_intel_feature_init(c);
1142 case X86_VENDOR_AMD:
1143 mce_amd_feature_init(c);
1150 static void mce_init_timer(void)
1152 struct timer_list *t = &__get_cpu_var(mce_timer);
1153 int *n = &__get_cpu_var(next_interval);
1155 *n = check_interval * HZ;
1158 setup_timer(t, mcheck_timer, smp_processor_id());
1159 t->expires = round_jiffies(jiffies + *n);
1164 * Called for each booted CPU to set up machine checks.
1165 * Must be called with preempt off:
1167 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
1172 mce_ancient_init(c);
1174 if (!mce_available(c))
1177 if (mce_cap_init() < 0) {
1183 machine_check_vector = do_machine_check;
1186 mce_cpu_features(c);
1191 * Character device to read and clear the MCE log.
1194 static DEFINE_SPINLOCK(mce_state_lock);
1195 static int open_count; /* #times opened */
1196 static int open_exclu; /* already open exclusive? */
1198 static int mce_open(struct inode *inode, struct file *file)
1200 spin_lock(&mce_state_lock);
1202 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
1203 spin_unlock(&mce_state_lock);
1208 if (file->f_flags & O_EXCL)
1212 spin_unlock(&mce_state_lock);
1214 return nonseekable_open(inode, file);
1217 static int mce_release(struct inode *inode, struct file *file)
1219 spin_lock(&mce_state_lock);
1224 spin_unlock(&mce_state_lock);
1229 static void collect_tscs(void *data)
1231 unsigned long *cpu_tsc = (unsigned long *)data;
1233 rdtscll(cpu_tsc[smp_processor_id()]);
1236 static DEFINE_MUTEX(mce_read_mutex);
1238 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1241 char __user *buf = ubuf;
1242 unsigned long *cpu_tsc;
1243 unsigned prev, next;
1246 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1250 mutex_lock(&mce_read_mutex);
1251 next = rcu_dereference(mcelog.next);
1253 /* Only supports full reads right now */
1254 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
1255 mutex_unlock(&mce_read_mutex);
1264 for (i = prev; i < next; i++) {
1265 unsigned long start = jiffies;
1267 while (!mcelog.entry[i].finished) {
1268 if (time_after_eq(jiffies, start + 2)) {
1269 memset(mcelog.entry + i, 0,
1270 sizeof(struct mce));
1276 err |= copy_to_user(buf, mcelog.entry + i,
1277 sizeof(struct mce));
1278 buf += sizeof(struct mce);
1283 memset(mcelog.entry + prev, 0,
1284 (next - prev) * sizeof(struct mce));
1286 next = cmpxchg(&mcelog.next, prev, 0);
1287 } while (next != prev);
1289 synchronize_sched();
1292 * Collect entries that were still getting written before the
1295 on_each_cpu(collect_tscs, cpu_tsc, 1);
1297 for (i = next; i < MCE_LOG_LEN; i++) {
1298 if (mcelog.entry[i].finished &&
1299 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
1300 err |= copy_to_user(buf, mcelog.entry+i,
1301 sizeof(struct mce));
1303 buf += sizeof(struct mce);
1304 memset(&mcelog.entry[i], 0, sizeof(struct mce));
1307 mutex_unlock(&mce_read_mutex);
1310 return err ? -EFAULT : buf - ubuf;
1313 static unsigned int mce_poll(struct file *file, poll_table *wait)
1315 poll_wait(file, &mce_wait, wait);
1316 if (rcu_dereference(mcelog.next))
1317 return POLLIN | POLLRDNORM;
1321 static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1323 int __user *p = (int __user *)arg;
1325 if (!capable(CAP_SYS_ADMIN))
1329 case MCE_GET_RECORD_LEN:
1330 return put_user(sizeof(struct mce), p);
1331 case MCE_GET_LOG_LEN:
1332 return put_user(MCE_LOG_LEN, p);
1333 case MCE_GETCLEAR_FLAGS: {
1337 flags = mcelog.flags;
1338 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
1340 return put_user(flags, p);
1347 /* Modified in mce-inject.c, so not static or const */
1348 struct file_operations mce_chrdev_ops = {
1350 .release = mce_release,
1353 .unlocked_ioctl = mce_ioctl,
1355 EXPORT_SYMBOL_GPL(mce_chrdev_ops);
1357 static struct miscdevice mce_log_device = {
1364 * mce=off disables machine check
1365 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1366 * monarchtimeout is how long to wait for other CPUs on machine
1367 * check, or 0 to not wait
1368 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1369 * mce=nobootlog Don't log MCEs from before booting.
1371 static int __init mcheck_enable(char *str)
1377 if (!strcmp(str, "off"))
1379 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1380 mce_bootlog = (str[0] == 'b');
1381 else if (isdigit(str[0])) {
1382 get_option(&str, &tolerant);
1385 get_option(&str, &monarch_timeout);
1388 printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
1394 __setup("mce", mcheck_enable);
1401 * Disable machine checks on suspend and shutdown. We can't really handle
1404 static int mce_disable(void)
1408 for (i = 0; i < banks; i++) {
1409 if (!skip_bank_init(i))
1410 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
1415 static int mce_suspend(struct sys_device *dev, pm_message_t state)
1417 return mce_disable();
1420 static int mce_shutdown(struct sys_device *dev)
1422 return mce_disable();
1426 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
1427 * Only one CPU is active at this time, the others get re-added later using
1430 static int mce_resume(struct sys_device *dev)
1433 mce_cpu_features(¤t_cpu_data);
1438 static void mce_cpu_restart(void *data)
1440 del_timer_sync(&__get_cpu_var(mce_timer));
1441 if (mce_available(¤t_cpu_data))
1446 /* Reinit MCEs after user configuration changes */
1447 static void mce_restart(void)
1449 on_each_cpu(mce_cpu_restart, NULL, 1);
1452 static struct sysdev_class mce_sysclass = {
1453 .suspend = mce_suspend,
1454 .shutdown = mce_shutdown,
1455 .resume = mce_resume,
1456 .name = "machinecheck",
1459 DEFINE_PER_CPU(struct sys_device, mce_dev);
1462 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
1464 static struct sysdev_attribute *bank_attrs;
1466 static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
1469 u64 b = bank[attr - bank_attrs];
1471 return sprintf(buf, "%llx\n", b);
1474 static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
1475 const char *buf, size_t size)
1479 if (strict_strtoull(buf, 0, &new) < 0)
1482 bank[attr - bank_attrs] = new;
1489 show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
1491 strcpy(buf, trigger);
1493 return strlen(trigger) + 1;
1496 static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
1497 const char *buf, size_t siz)
1502 strncpy(trigger, buf, sizeof(trigger));
1503 trigger[sizeof(trigger)-1] = 0;
1504 len = strlen(trigger);
1505 p = strchr(trigger, '\n');
1513 static ssize_t store_int_with_restart(struct sys_device *s,
1514 struct sysdev_attribute *attr,
1515 const char *buf, size_t size)
1517 ssize_t ret = sysdev_store_int(s, attr, buf, size);
1522 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
1523 static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
1524 static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
1526 static struct sysdev_ext_attribute attr_check_interval = {
1527 _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
1528 store_int_with_restart),
1532 static struct sysdev_attribute *mce_attrs[] = {
1533 &attr_tolerant.attr, &attr_check_interval.attr, &attr_trigger,
1534 &attr_monarch_timeout.attr,
1538 static cpumask_var_t mce_dev_initialized;
1540 /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
1541 static __cpuinit int mce_create_device(unsigned int cpu)
1546 if (!mce_available(&boot_cpu_data))
1549 memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject));
1550 per_cpu(mce_dev, cpu).id = cpu;
1551 per_cpu(mce_dev, cpu).cls = &mce_sysclass;
1553 err = sysdev_register(&per_cpu(mce_dev, cpu));
1557 for (i = 0; mce_attrs[i]; i++) {
1558 err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1562 for (i = 0; i < banks; i++) {
1563 err = sysdev_create_file(&per_cpu(mce_dev, cpu),
1568 cpumask_set_cpu(cpu, mce_dev_initialized);
1573 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
1576 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1578 sysdev_unregister(&per_cpu(mce_dev, cpu));
1583 static __cpuinit void mce_remove_device(unsigned int cpu)
1587 if (!cpumask_test_cpu(cpu, mce_dev_initialized))
1590 for (i = 0; mce_attrs[i]; i++)
1591 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1593 for (i = 0; i < banks; i++)
1594 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
1596 sysdev_unregister(&per_cpu(mce_dev, cpu));
1597 cpumask_clear_cpu(cpu, mce_dev_initialized);
1600 /* Make sure there are no machine checks on offlined CPUs. */
1601 static void mce_disable_cpu(void *h)
1603 unsigned long action = *(unsigned long *)h;
1606 if (!mce_available(¤t_cpu_data))
1608 if (!(action & CPU_TASKS_FROZEN))
1610 for (i = 0; i < banks; i++) {
1611 if (!skip_bank_init(i))
1612 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
1616 static void mce_reenable_cpu(void *h)
1618 unsigned long action = *(unsigned long *)h;
1621 if (!mce_available(¤t_cpu_data))
1624 if (!(action & CPU_TASKS_FROZEN))
1626 for (i = 0; i < banks; i++) {
1627 if (!skip_bank_init(i))
1628 wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]);
1632 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
1633 static int __cpuinit
1634 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1636 unsigned int cpu = (unsigned long)hcpu;
1637 struct timer_list *t = &per_cpu(mce_timer, cpu);
1641 case CPU_ONLINE_FROZEN:
1642 mce_create_device(cpu);
1643 if (threshold_cpu_callback)
1644 threshold_cpu_callback(action, cpu);
1647 case CPU_DEAD_FROZEN:
1648 if (threshold_cpu_callback)
1649 threshold_cpu_callback(action, cpu);
1650 mce_remove_device(cpu);
1652 case CPU_DOWN_PREPARE:
1653 case CPU_DOWN_PREPARE_FROZEN:
1655 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
1657 case CPU_DOWN_FAILED:
1658 case CPU_DOWN_FAILED_FROZEN:
1659 t->expires = round_jiffies(jiffies +
1660 __get_cpu_var(next_interval));
1661 add_timer_on(t, cpu);
1662 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1665 /* intentionally ignoring frozen here */
1666 cmci_rediscover(cpu);
1672 static struct notifier_block mce_cpu_notifier __cpuinitdata = {
1673 .notifier_call = mce_cpu_callback,
1676 static __init int mce_init_banks(void)
1680 bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
1685 for (i = 0; i < banks; i++) {
1686 struct sysdev_attribute *a = &bank_attrs[i];
1688 a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
1692 a->attr.mode = 0644;
1693 a->show = show_bank;
1694 a->store = set_bank;
1700 kfree(bank_attrs[i].attr.name);
1707 static __init int mce_init_device(void)
1712 if (!mce_available(&boot_cpu_data))
1715 alloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
1717 err = mce_init_banks();
1721 err = sysdev_class_register(&mce_sysclass);
1725 for_each_online_cpu(i) {
1726 err = mce_create_device(i);
1731 register_hotcpu_notifier(&mce_cpu_notifier);
1732 misc_register(&mce_log_device);
1737 device_initcall(mce_init_device);
1739 #else /* CONFIG_X86_OLD_MCE: */
1742 EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
1744 /* This has to be run for each processor */
1745 void mcheck_init(struct cpuinfo_x86 *c)
1747 if (mce_disabled == 1)
1750 switch (c->x86_vendor) {
1751 case X86_VENDOR_AMD:
1755 case X86_VENDOR_INTEL:
1757 intel_p5_mcheck_init(c);
1759 intel_p6_mcheck_init(c);
1761 intel_p4_mcheck_init(c);
1764 case X86_VENDOR_CENTAUR:
1766 winchip_mcheck_init(c);
1772 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", nr_mce_banks);
1775 static int __init mcheck_enable(char *str)
1781 __setup("mce", mcheck_enable);
1783 #endif /* CONFIG_X86_OLD_MCE */
1786 * Old style boot options parsing. Only for compatibility.
1788 static int __init mcheck_disable(char *str)
1793 __setup("nomce", mcheck_disable);