tracing: Still trace filtered irq functions when irq trace is disabled
[pandora-kernel.git] / kernel / watchdog.c
1 /*
2  * Detect hard and soft lockups on a system
3  *
4  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  * this code detects hard lockups: incidents in where on a CPU
7  * the kernel does not respond to anything except NMI.
8  *
9  * Note: Most of this code is borrowed heavily from softlockup.c,
10  * so thanks to Ingo for the initial implementation.
11  * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
12  * to those contributors as well.
13  */
14
15 #include <linux/mm.h>
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/lockdep.h>
23 #include <linux/notifier.h>
24 #include <linux/module.h>
25 #include <linux/sysctl.h>
26
27 #include <asm/irq_regs.h>
28 #include <linux/perf_event.h>
29
30 int watchdog_enabled = 1;
31 int __read_mostly watchdog_thresh = 10;
32
33 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
34 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
35 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
36 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
37 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
38 #ifdef CONFIG_HARDLOCKUP_DETECTOR
39 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
40 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
41 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
42 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
43 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
44 #endif
45
46 /* boot commands */
47 /*
48  * Should we panic when a soft-lockup or hard-lockup occurs:
49  */
50 #ifdef CONFIG_HARDLOCKUP_DETECTOR
51 static int hardlockup_panic =
52                         CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
53
54 static int __init hardlockup_panic_setup(char *str)
55 {
56         if (!strncmp(str, "panic", 5))
57                 hardlockup_panic = 1;
58         else if (!strncmp(str, "nopanic", 7))
59                 hardlockup_panic = 0;
60         else if (!strncmp(str, "0", 1))
61                 watchdog_enabled = 0;
62         return 1;
63 }
64 __setup("nmi_watchdog=", hardlockup_panic_setup);
65 #endif
66
67 unsigned int __read_mostly softlockup_panic =
68                         CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
69
70 static int __init softlockup_panic_setup(char *str)
71 {
72         softlockup_panic = simple_strtoul(str, NULL, 0);
73
74         return 1;
75 }
76 __setup("softlockup_panic=", softlockup_panic_setup);
77
78 static int __init nowatchdog_setup(char *str)
79 {
80         watchdog_enabled = 0;
81         return 1;
82 }
83 __setup("nowatchdog", nowatchdog_setup);
84
85 /* deprecated */
86 static int __init nosoftlockup_setup(char *str)
87 {
88         watchdog_enabled = 0;
89         return 1;
90 }
91 __setup("nosoftlockup", nosoftlockup_setup);
92 /*  */
93
94 /*
95  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
96  * lockups can have false positives under extreme conditions. So we generally
97  * want a higher threshold for soft lockups than for hard lockups. So we couple
98  * the thresholds with a factor: we make the soft threshold twice the amount of
99  * time the hard threshold is.
100  */
101 static int get_softlockup_thresh(void)
102 {
103         return watchdog_thresh * 2;
104 }
105
106 /*
107  * Returns seconds, approximately.  We don't need nanosecond
108  * resolution, and we don't need to waste time with a big divide when
109  * 2^30ns == 1.074s.
110  */
111 static unsigned long get_timestamp(int this_cpu)
112 {
113         return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
114 }
115
116 static unsigned long get_sample_period(void)
117 {
118         /*
119          * convert watchdog_thresh from seconds to ns
120          * the divide by 5 is to give hrtimer 5 chances to
121          * increment before the hardlockup detector generates
122          * a warning
123          */
124         return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
125 }
126
127 /* Commands for resetting the watchdog */
128 static void __touch_watchdog(void)
129 {
130         int this_cpu = smp_processor_id();
131
132         __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
133 }
134
135 void touch_softlockup_watchdog(void)
136 {
137         __this_cpu_write(watchdog_touch_ts, 0);
138 }
139 EXPORT_SYMBOL(touch_softlockup_watchdog);
140
141 void touch_all_softlockup_watchdogs(void)
142 {
143         int cpu;
144
145         /*
146          * this is done lockless
147          * do we care if a 0 races with a timestamp?
148          * all it means is the softlock check starts one cycle later
149          */
150         for_each_online_cpu(cpu)
151                 per_cpu(watchdog_touch_ts, cpu) = 0;
152 }
153
154 #ifdef CONFIG_HARDLOCKUP_DETECTOR
155 void touch_nmi_watchdog(void)
156 {
157         if (watchdog_enabled) {
158                 unsigned cpu;
159
160                 for_each_present_cpu(cpu) {
161                         if (per_cpu(watchdog_nmi_touch, cpu) != true)
162                                 per_cpu(watchdog_nmi_touch, cpu) = true;
163                 }
164         }
165         touch_softlockup_watchdog();
166 }
167 EXPORT_SYMBOL(touch_nmi_watchdog);
168
169 #endif
170
171 void touch_softlockup_watchdog_sync(void)
172 {
173         __raw_get_cpu_var(softlockup_touch_sync) = true;
174         __raw_get_cpu_var(watchdog_touch_ts) = 0;
175 }
176
177 #ifdef CONFIG_HARDLOCKUP_DETECTOR
178 /* watchdog detector functions */
179 static int is_hardlockup(void)
180 {
181         unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
182
183         if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
184                 return 1;
185
186         __this_cpu_write(hrtimer_interrupts_saved, hrint);
187         return 0;
188 }
189 #endif
190
191 static int is_softlockup(unsigned long touch_ts)
192 {
193         unsigned long now = get_timestamp(smp_processor_id());
194
195         /* Warn about unreasonable delays: */
196         if (time_after(now, touch_ts + get_softlockup_thresh()))
197                 return now - touch_ts;
198
199         return 0;
200 }
201
202 #ifdef CONFIG_HARDLOCKUP_DETECTOR
203 void __weak hw_nmi_watchdog_set_attr(struct perf_event_attr *wd_attr) { }
204
205 static struct perf_event_attr wd_hw_attr = {
206         .type           = PERF_TYPE_HARDWARE,
207         .config         = PERF_COUNT_HW_CPU_CYCLES,
208         .size           = sizeof(struct perf_event_attr),
209         .pinned         = 1,
210         .disabled       = 1,
211 };
212
213 /* Callback function for perf event subsystem */
214 static void watchdog_overflow_callback(struct perf_event *event,
215                  struct perf_sample_data *data,
216                  struct pt_regs *regs)
217 {
218         /* Ensure the watchdog never gets throttled */
219         event->hw.interrupts = 0;
220
221         if (__this_cpu_read(watchdog_nmi_touch) == true) {
222                 __this_cpu_write(watchdog_nmi_touch, false);
223                 return;
224         }
225
226         /* check for a hardlockup
227          * This is done by making sure our timer interrupt
228          * is incrementing.  The timer interrupt should have
229          * fired multiple times before we overflow'd.  If it hasn't
230          * then this is a good indication the cpu is stuck
231          */
232         if (is_hardlockup()) {
233                 int this_cpu = smp_processor_id();
234
235                 /* only print hardlockups once */
236                 if (__this_cpu_read(hard_watchdog_warn) == true)
237                         return;
238
239                 if (hardlockup_panic)
240                         panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
241                 else
242                         WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
243
244                 __this_cpu_write(hard_watchdog_warn, true);
245                 return;
246         }
247
248         __this_cpu_write(hard_watchdog_warn, false);
249         return;
250 }
251 static void watchdog_interrupt_count(void)
252 {
253         __this_cpu_inc(hrtimer_interrupts);
254 }
255 #else
256 static inline void watchdog_interrupt_count(void) { return; }
257 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
258
259 /* watchdog kicker functions */
260 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
261 {
262         unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
263         struct pt_regs *regs = get_irq_regs();
264         int duration;
265
266         /* kick the hardlockup detector */
267         watchdog_interrupt_count();
268
269         /* kick the softlockup detector */
270         wake_up_process(__this_cpu_read(softlockup_watchdog));
271
272         /* .. and repeat */
273         hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
274
275         if (touch_ts == 0) {
276                 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
277                         /*
278                          * If the time stamp was touched atomically
279                          * make sure the scheduler tick is up to date.
280                          */
281                         __this_cpu_write(softlockup_touch_sync, false);
282                         sched_clock_tick();
283                 }
284                 __touch_watchdog();
285                 return HRTIMER_RESTART;
286         }
287
288         /* check for a softlockup
289          * This is done by making sure a high priority task is
290          * being scheduled.  The task touches the watchdog to
291          * indicate it is getting cpu time.  If it hasn't then
292          * this is a good indication some task is hogging the cpu
293          */
294         duration = is_softlockup(touch_ts);
295         if (unlikely(duration)) {
296                 /* only warn once */
297                 if (__this_cpu_read(soft_watchdog_warn) == true)
298                         return HRTIMER_RESTART;
299
300                 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
301                         smp_processor_id(), duration,
302                         current->comm, task_pid_nr(current));
303                 print_modules();
304                 print_irqtrace_events(current);
305                 if (regs)
306                         show_regs(regs);
307                 else
308                         dump_stack();
309
310                 if (softlockup_panic)
311                         panic("softlockup: hung tasks");
312                 __this_cpu_write(soft_watchdog_warn, true);
313         } else
314                 __this_cpu_write(soft_watchdog_warn, false);
315
316         return HRTIMER_RESTART;
317 }
318
319
320 /*
321  * The watchdog thread - touches the timestamp.
322  */
323 static int watchdog(void *unused)
324 {
325         static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
326         struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
327
328         sched_setscheduler(current, SCHED_FIFO, &param);
329
330         /* initialize timestamp */
331         __touch_watchdog();
332
333         /* kick off the timer for the hardlockup detector */
334         /* done here because hrtimer_start can only pin to smp_processor_id() */
335         hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
336                       HRTIMER_MODE_REL_PINNED);
337
338         set_current_state(TASK_INTERRUPTIBLE);
339         /*
340          * Run briefly once per second to reset the softlockup timestamp.
341          * If this gets delayed for more than 60 seconds then the
342          * debug-printout triggers in watchdog_timer_fn().
343          */
344         while (!kthread_should_stop()) {
345                 __touch_watchdog();
346                 schedule();
347
348                 if (kthread_should_stop())
349                         break;
350
351                 set_current_state(TASK_INTERRUPTIBLE);
352         }
353         __set_current_state(TASK_RUNNING);
354
355         return 0;
356 }
357
358
359 #ifdef CONFIG_HARDLOCKUP_DETECTOR
360 static int watchdog_nmi_enable(int cpu)
361 {
362         struct perf_event_attr *wd_attr;
363         struct perf_event *event = per_cpu(watchdog_ev, cpu);
364
365         /* is it already setup and enabled? */
366         if (event && event->state > PERF_EVENT_STATE_OFF)
367                 goto out;
368
369         /* it is setup but not enabled */
370         if (event != NULL)
371                 goto out_enable;
372
373         wd_attr = &wd_hw_attr;
374         wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
375         hw_nmi_watchdog_set_attr(wd_attr);
376
377         /* Try to register using hardware perf events */
378         event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
379         if (!IS_ERR(event)) {
380                 printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
381                 goto out_save;
382         }
383
384
385         /* vary the KERN level based on the returned errno */
386         if (PTR_ERR(event) == -EOPNOTSUPP)
387                 printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
388         else if (PTR_ERR(event) == -ENOENT)
389                 printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu);
390         else
391                 printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event));
392         return PTR_ERR(event);
393
394         /* success path */
395 out_save:
396         per_cpu(watchdog_ev, cpu) = event;
397 out_enable:
398         perf_event_enable(per_cpu(watchdog_ev, cpu));
399 out:
400         return 0;
401 }
402
403 static void watchdog_nmi_disable(int cpu)
404 {
405         struct perf_event *event = per_cpu(watchdog_ev, cpu);
406
407         if (event) {
408                 perf_event_disable(event);
409                 per_cpu(watchdog_ev, cpu) = NULL;
410
411                 /* should be in cleanup, but blocks oprofile */
412                 perf_event_release_kernel(event);
413         }
414         return;
415 }
416 #else
417 static int watchdog_nmi_enable(int cpu) { return 0; }
418 static void watchdog_nmi_disable(int cpu) { return; }
419 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
420
421 /* prepare/enable/disable routines */
422 static void watchdog_prepare_cpu(int cpu)
423 {
424         struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
425
426         WARN_ON(per_cpu(softlockup_watchdog, cpu));
427         hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
428         hrtimer->function = watchdog_timer_fn;
429 }
430
431 static int watchdog_enable(int cpu)
432 {
433         struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
434         int err = 0;
435
436         /* enable the perf event */
437         err = watchdog_nmi_enable(cpu);
438
439         /* Regardless of err above, fall through and start softlockup */
440
441         /* create the watchdog thread */
442         if (!p) {
443                 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
444                 if (IS_ERR(p)) {
445                         printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
446                         if (!err) {
447                                 /* if hardlockup hasn't already set this */
448                                 err = PTR_ERR(p);
449                                 /* and disable the perf event */
450                                 watchdog_nmi_disable(cpu);
451                         }
452                         goto out;
453                 }
454                 kthread_bind(p, cpu);
455                 per_cpu(watchdog_touch_ts, cpu) = 0;
456                 per_cpu(softlockup_watchdog, cpu) = p;
457                 wake_up_process(p);
458         }
459
460 out:
461         return err;
462 }
463
464 static void watchdog_disable(int cpu)
465 {
466         struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
467         struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
468
469         /*
470          * cancel the timer first to stop incrementing the stats
471          * and waking up the kthread
472          */
473         hrtimer_cancel(hrtimer);
474
475         /* disable the perf event */
476         watchdog_nmi_disable(cpu);
477
478         /* stop the watchdog thread */
479         if (p) {
480                 per_cpu(softlockup_watchdog, cpu) = NULL;
481                 kthread_stop(p);
482         }
483 }
484
485 static void watchdog_enable_all_cpus(void)
486 {
487         int cpu;
488
489         watchdog_enabled = 0;
490
491         for_each_online_cpu(cpu)
492                 if (!watchdog_enable(cpu))
493                         /* if any cpu succeeds, watchdog is considered
494                            enabled for the system */
495                         watchdog_enabled = 1;
496
497         if (!watchdog_enabled)
498                 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
499
500 }
501
502 static void watchdog_disable_all_cpus(void)
503 {
504         int cpu;
505
506         for_each_online_cpu(cpu)
507                 watchdog_disable(cpu);
508
509         /* if all watchdogs are disabled, then they are disabled for the system */
510         watchdog_enabled = 0;
511 }
512
513
514 /* sysctl functions */
515 #ifdef CONFIG_SYSCTL
516 /*
517  * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
518  */
519
520 int proc_dowatchdog(struct ctl_table *table, int write,
521                     void __user *buffer, size_t *lenp, loff_t *ppos)
522 {
523         int ret;
524
525         ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
526         if (ret || !write)
527                 goto out;
528
529         if (watchdog_enabled && watchdog_thresh)
530                 watchdog_enable_all_cpus();
531         else
532                 watchdog_disable_all_cpus();
533
534 out:
535         return ret;
536 }
537 #endif /* CONFIG_SYSCTL */
538
539
540 /*
541  * Create/destroy watchdog threads as CPUs come and go:
542  */
543 static int __cpuinit
544 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
545 {
546         int hotcpu = (unsigned long)hcpu;
547
548         switch (action) {
549         case CPU_UP_PREPARE:
550         case CPU_UP_PREPARE_FROZEN:
551                 watchdog_prepare_cpu(hotcpu);
552                 break;
553         case CPU_ONLINE:
554         case CPU_ONLINE_FROZEN:
555                 if (watchdog_enabled)
556                         watchdog_enable(hotcpu);
557                 break;
558 #ifdef CONFIG_HOTPLUG_CPU
559         case CPU_UP_CANCELED:
560         case CPU_UP_CANCELED_FROZEN:
561                 watchdog_disable(hotcpu);
562                 break;
563         case CPU_DEAD:
564         case CPU_DEAD_FROZEN:
565                 watchdog_disable(hotcpu);
566                 break;
567 #endif /* CONFIG_HOTPLUG_CPU */
568         }
569
570         /*
571          * hardlockup and softlockup are not important enough
572          * to block cpu bring up.  Just always succeed and
573          * rely on printk output to flag problems.
574          */
575         return NOTIFY_OK;
576 }
577
578 static struct notifier_block __cpuinitdata cpu_nfb = {
579         .notifier_call = cpu_callback
580 };
581
582 void __init lockup_detector_init(void)
583 {
584         void *cpu = (void *)(long)smp_processor_id();
585         int err;
586
587         err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
588         WARN_ON(notifier_to_errno(err));
589
590         cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
591         register_cpu_notifier(&cpu_nfb);
592
593         return;
594 }