2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * this code detects hard lockups: incidents in where on a CPU
7 * the kernel does not respond to anything except NMI.
9 * Note: Most of this code is borrowed heavily from softlockup.c,
10 * so thanks to Ingo for the initial implementation.
11 * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
12 * to those contributors as well.
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/lockdep.h>
23 #include <linux/notifier.h>
24 #include <linux/module.h>
25 #include <linux/sysctl.h>
27 #include <asm/irq_regs.h>
28 #include <linux/perf_event.h>
30 int watchdog_enabled = 1;
31 int __read_mostly watchdog_thresh = 10;
33 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
34 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
35 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
36 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
37 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
38 #ifdef CONFIG_HARDLOCKUP_DETECTOR
39 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
40 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
41 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
42 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
43 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
48 * Should we panic when a soft-lockup or hard-lockup occurs:
50 #ifdef CONFIG_HARDLOCKUP_DETECTOR
51 static int hardlockup_panic =
52 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
54 static int __init hardlockup_panic_setup(char *str)
56 if (!strncmp(str, "panic", 5))
58 else if (!strncmp(str, "nopanic", 7))
60 else if (!strncmp(str, "0", 1))
64 __setup("nmi_watchdog=", hardlockup_panic_setup);
67 unsigned int __read_mostly softlockup_panic =
68 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
70 static int __init softlockup_panic_setup(char *str)
72 softlockup_panic = simple_strtoul(str, NULL, 0);
76 __setup("softlockup_panic=", softlockup_panic_setup);
78 static int __init nowatchdog_setup(char *str)
83 __setup("nowatchdog", nowatchdog_setup);
86 static int __init nosoftlockup_setup(char *str)
91 __setup("nosoftlockup", nosoftlockup_setup);
95 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
96 * lockups can have false positives under extreme conditions. So we generally
97 * want a higher threshold for soft lockups than for hard lockups. So we couple
98 * the thresholds with a factor: we make the soft threshold twice the amount of
99 * time the hard threshold is.
101 static int get_softlockup_thresh(void)
103 return watchdog_thresh * 2;
107 * Returns seconds, approximately. We don't need nanosecond
108 * resolution, and we don't need to waste time with a big divide when
111 static unsigned long get_timestamp(int this_cpu)
113 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
116 static u64 get_sample_period(void)
119 * convert watchdog_thresh from seconds to ns
120 * the divide by 5 is to give hrtimer 5 chances to
121 * increment before the hardlockup detector generates
124 return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
127 /* Commands for resetting the watchdog */
128 static void __touch_watchdog(void)
130 int this_cpu = smp_processor_id();
132 __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
135 void touch_softlockup_watchdog(void)
137 __this_cpu_write(watchdog_touch_ts, 0);
139 EXPORT_SYMBOL(touch_softlockup_watchdog);
141 void touch_all_softlockup_watchdogs(void)
146 * this is done lockless
147 * do we care if a 0 races with a timestamp?
148 * all it means is the softlock check starts one cycle later
150 for_each_online_cpu(cpu)
151 per_cpu(watchdog_touch_ts, cpu) = 0;
154 #ifdef CONFIG_HARDLOCKUP_DETECTOR
155 void touch_nmi_watchdog(void)
157 if (watchdog_enabled) {
160 for_each_present_cpu(cpu) {
161 if (per_cpu(watchdog_nmi_touch, cpu) != true)
162 per_cpu(watchdog_nmi_touch, cpu) = true;
165 touch_softlockup_watchdog();
167 EXPORT_SYMBOL(touch_nmi_watchdog);
171 void touch_softlockup_watchdog_sync(void)
173 __raw_get_cpu_var(softlockup_touch_sync) = true;
174 __raw_get_cpu_var(watchdog_touch_ts) = 0;
177 #ifdef CONFIG_HARDLOCKUP_DETECTOR
178 /* watchdog detector functions */
179 static int is_hardlockup(void)
181 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
183 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
186 __this_cpu_write(hrtimer_interrupts_saved, hrint);
191 static int is_softlockup(unsigned long touch_ts)
193 unsigned long now = get_timestamp(smp_processor_id());
195 /* Warn about unreasonable delays: */
196 if (time_after(now, touch_ts + get_softlockup_thresh()))
197 return now - touch_ts;
202 #ifdef CONFIG_HARDLOCKUP_DETECTOR
204 static struct perf_event_attr wd_hw_attr = {
205 .type = PERF_TYPE_HARDWARE,
206 .config = PERF_COUNT_HW_CPU_CYCLES,
207 .size = sizeof(struct perf_event_attr),
212 /* Callback function for perf event subsystem */
213 static void watchdog_overflow_callback(struct perf_event *event,
214 struct perf_sample_data *data,
215 struct pt_regs *regs)
217 /* Ensure the watchdog never gets throttled */
218 event->hw.interrupts = 0;
220 if (__this_cpu_read(watchdog_nmi_touch) == true) {
221 __this_cpu_write(watchdog_nmi_touch, false);
225 /* check for a hardlockup
226 * This is done by making sure our timer interrupt
227 * is incrementing. The timer interrupt should have
228 * fired multiple times before we overflow'd. If it hasn't
229 * then this is a good indication the cpu is stuck
231 if (is_hardlockup()) {
232 int this_cpu = smp_processor_id();
234 /* only print hardlockups once */
235 if (__this_cpu_read(hard_watchdog_warn) == true)
238 if (hardlockup_panic)
239 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
241 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
243 __this_cpu_write(hard_watchdog_warn, true);
247 __this_cpu_write(hard_watchdog_warn, false);
250 static void watchdog_interrupt_count(void)
252 __this_cpu_inc(hrtimer_interrupts);
255 static inline void watchdog_interrupt_count(void) { return; }
256 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
258 /* watchdog kicker functions */
259 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
261 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
262 struct pt_regs *regs = get_irq_regs();
265 /* kick the hardlockup detector */
266 watchdog_interrupt_count();
268 /* kick the softlockup detector */
269 wake_up_process(__this_cpu_read(softlockup_watchdog));
272 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
275 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
277 * If the time stamp was touched atomically
278 * make sure the scheduler tick is up to date.
280 __this_cpu_write(softlockup_touch_sync, false);
284 return HRTIMER_RESTART;
287 /* check for a softlockup
288 * This is done by making sure a high priority task is
289 * being scheduled. The task touches the watchdog to
290 * indicate it is getting cpu time. If it hasn't then
291 * this is a good indication some task is hogging the cpu
293 duration = is_softlockup(touch_ts);
294 if (unlikely(duration)) {
296 if (__this_cpu_read(soft_watchdog_warn) == true)
297 return HRTIMER_RESTART;
299 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
300 smp_processor_id(), duration,
301 current->comm, task_pid_nr(current));
303 print_irqtrace_events(current);
309 if (softlockup_panic)
310 panic("softlockup: hung tasks");
311 __this_cpu_write(soft_watchdog_warn, true);
313 __this_cpu_write(soft_watchdog_warn, false);
315 return HRTIMER_RESTART;
320 * The watchdog thread - touches the timestamp.
322 static int watchdog(void *unused)
324 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
325 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
327 sched_setscheduler(current, SCHED_FIFO, ¶m);
329 /* initialize timestamp */
332 /* kick off the timer for the hardlockup detector */
333 /* done here because hrtimer_start can only pin to smp_processor_id() */
334 hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
335 HRTIMER_MODE_REL_PINNED);
337 set_current_state(TASK_INTERRUPTIBLE);
339 * Run briefly once per second to reset the softlockup timestamp.
340 * If this gets delayed for more than 60 seconds then the
341 * debug-printout triggers in watchdog_timer_fn().
343 while (!kthread_should_stop()) {
347 if (kthread_should_stop())
350 set_current_state(TASK_INTERRUPTIBLE);
352 __set_current_state(TASK_RUNNING);
353 param.sched_priority = 0;
354 sched_setscheduler(current, SCHED_NORMAL, ¶m);
359 #ifdef CONFIG_HARDLOCKUP_DETECTOR
360 static int watchdog_nmi_enable(int cpu)
362 struct perf_event_attr *wd_attr;
363 struct perf_event *event = per_cpu(watchdog_ev, cpu);
365 /* is it already setup and enabled? */
366 if (event && event->state > PERF_EVENT_STATE_OFF)
369 /* it is setup but not enabled */
373 wd_attr = &wd_hw_attr;
374 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
376 /* Try to register using hardware perf events */
377 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
378 if (!IS_ERR(event)) {
379 printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
384 /* vary the KERN level based on the returned errno */
385 if (PTR_ERR(event) == -EOPNOTSUPP)
386 printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
387 else if (PTR_ERR(event) == -ENOENT)
388 printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu);
390 printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event));
391 return PTR_ERR(event);
395 per_cpu(watchdog_ev, cpu) = event;
397 perf_event_enable(per_cpu(watchdog_ev, cpu));
402 static void watchdog_nmi_disable(int cpu)
404 struct perf_event *event = per_cpu(watchdog_ev, cpu);
407 perf_event_disable(event);
408 per_cpu(watchdog_ev, cpu) = NULL;
410 /* should be in cleanup, but blocks oprofile */
411 perf_event_release_kernel(event);
416 static int watchdog_nmi_enable(int cpu) { return 0; }
417 static void watchdog_nmi_disable(int cpu) { return; }
418 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
420 /* prepare/enable/disable routines */
421 static void watchdog_prepare_cpu(int cpu)
423 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
425 WARN_ON(per_cpu(softlockup_watchdog, cpu));
426 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
427 hrtimer->function = watchdog_timer_fn;
430 static int watchdog_enable(int cpu)
432 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
435 /* enable the perf event */
436 err = watchdog_nmi_enable(cpu);
438 /* Regardless of err above, fall through and start softlockup */
440 /* create the watchdog thread */
442 p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu);
444 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
446 /* if hardlockup hasn't already set this */
448 /* and disable the perf event */
449 watchdog_nmi_disable(cpu);
453 kthread_bind(p, cpu);
454 per_cpu(watchdog_touch_ts, cpu) = 0;
455 per_cpu(softlockup_watchdog, cpu) = p;
463 static void watchdog_disable(int cpu)
465 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
466 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
469 * cancel the timer first to stop incrementing the stats
470 * and waking up the kthread
472 hrtimer_cancel(hrtimer);
474 /* disable the perf event */
475 watchdog_nmi_disable(cpu);
477 /* stop the watchdog thread */
479 per_cpu(softlockup_watchdog, cpu) = NULL;
484 /* sysctl functions */
486 static void watchdog_enable_all_cpus(void)
490 watchdog_enabled = 0;
492 for_each_online_cpu(cpu)
493 if (!watchdog_enable(cpu))
494 /* if any cpu succeeds, watchdog is considered
495 enabled for the system */
496 watchdog_enabled = 1;
498 if (!watchdog_enabled)
499 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
503 static void watchdog_disable_all_cpus(void)
507 for_each_online_cpu(cpu)
508 watchdog_disable(cpu);
510 /* if all watchdogs are disabled, then they are disabled for the system */
511 watchdog_enabled = 0;
516 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
519 int proc_dowatchdog(struct ctl_table *table, int write,
520 void __user *buffer, size_t *lenp, loff_t *ppos)
524 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528 if (watchdog_enabled && watchdog_thresh)
529 watchdog_enable_all_cpus();
531 watchdog_disable_all_cpus();
536 #endif /* CONFIG_SYSCTL */
540 * Create/destroy watchdog threads as CPUs come and go:
543 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
545 int hotcpu = (unsigned long)hcpu;
549 case CPU_UP_PREPARE_FROZEN:
550 watchdog_prepare_cpu(hotcpu);
553 case CPU_ONLINE_FROZEN:
554 if (watchdog_enabled)
555 watchdog_enable(hotcpu);
557 #ifdef CONFIG_HOTPLUG_CPU
558 case CPU_UP_CANCELED:
559 case CPU_UP_CANCELED_FROZEN:
560 watchdog_disable(hotcpu);
563 case CPU_DEAD_FROZEN:
564 watchdog_disable(hotcpu);
566 #endif /* CONFIG_HOTPLUG_CPU */
570 * hardlockup and softlockup are not important enough
571 * to block cpu bring up. Just always succeed and
572 * rely on printk output to flag problems.
577 static struct notifier_block __cpuinitdata cpu_nfb = {
578 .notifier_call = cpu_callback
581 void __init lockup_detector_init(void)
583 void *cpu = (void *)(long)smp_processor_id();
586 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
587 WARN_ON(notifier_to_errno(err));
589 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
590 register_cpu_notifier(&cpu_nfb);