4 * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc.
6 * this code detects soft lockups: incidents in where on a CPU
7 * the kernel does not reschedule for 10 seconds or more.
10 #include <linux/cpu.h>
11 #include <linux/nmi.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/freezer.h>
15 #include <linux/kthread.h>
16 #include <linux/lockdep.h>
17 #include <linux/notifier.h>
18 #include <linux/module.h>
20 #include <asm/irq_regs.h>
22 static DEFINE_SPINLOCK(print_lock);
24 static DEFINE_PER_CPU(unsigned long, touch_timestamp);
25 static DEFINE_PER_CPU(unsigned long, print_timestamp);
26 static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
28 static int __read_mostly did_panic;
29 int __read_mostly softlockup_thresh = 60;
32 * Should we panic (and reboot, if panic_timeout= is set) when a
35 unsigned int __read_mostly softlockup_panic =
36 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
38 static int __init softlockup_panic_setup(char *str)
40 softlockup_panic = simple_strtoul(str, NULL, 0);
44 __setup("softlockup_panic=", softlockup_panic_setup);
47 softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
54 static struct notifier_block panic_block = {
55 .notifier_call = softlock_panic,
59 * Returns seconds, approximately. We don't need nanosecond
60 * resolution, and we don't need to waste time with a big divide when
63 static unsigned long get_timestamp(int this_cpu)
65 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
68 static void __touch_softlockup_watchdog(void)
70 int this_cpu = raw_smp_processor_id();
72 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
75 void touch_softlockup_watchdog(void)
77 __raw_get_cpu_var(touch_timestamp) = 0;
79 EXPORT_SYMBOL(touch_softlockup_watchdog);
81 void touch_all_softlockup_watchdogs(void)
85 /* Cause each CPU to re-update its timestamp rather than complain */
86 for_each_online_cpu(cpu)
87 per_cpu(touch_timestamp, cpu) = 0;
89 EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
92 * This callback runs from the timer interrupt, and checks
93 * whether the watchdog thread has hung or not:
95 void softlockup_tick(void)
97 int this_cpu = smp_processor_id();
98 unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
99 unsigned long print_timestamp;
100 struct pt_regs *regs = get_irq_regs();
103 /* Is detection switched off? */
104 if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
105 /* Be sure we don't false trigger if switched back on */
107 per_cpu(touch_timestamp, this_cpu) = 0;
111 if (touch_timestamp == 0) {
112 __touch_softlockup_watchdog();
116 print_timestamp = per_cpu(print_timestamp, this_cpu);
118 /* report at most once a second */
119 if ((print_timestamp >= touch_timestamp &&
120 print_timestamp < (touch_timestamp + 1)) ||
125 /* do not print during early bootup: */
126 if (unlikely(system_state != SYSTEM_RUNNING)) {
127 __touch_softlockup_watchdog();
131 now = get_timestamp(this_cpu);
134 * Wake up the high-prio watchdog task twice per
135 * threshold timespan.
137 if (now > touch_timestamp + softlockup_thresh/2)
138 wake_up_process(per_cpu(watchdog_task, this_cpu));
140 /* Warn about unreasonable delays: */
141 if (now <= (touch_timestamp + softlockup_thresh))
144 per_cpu(print_timestamp, this_cpu) = touch_timestamp;
146 spin_lock(&print_lock);
147 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
148 this_cpu, now - touch_timestamp,
149 current->comm, task_pid_nr(current));
151 print_irqtrace_events(current);
156 spin_unlock(&print_lock);
158 if (softlockup_panic)
159 panic("softlockup: hung tasks");
163 * Have a reasonable limit on the number of tasks checked:
165 unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
168 * Zero means infinite timeout - no checking done:
170 unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
172 unsigned long __read_mostly sysctl_hung_task_warnings = 10;
175 * Only do the hung-tasks check on one CPU:
177 static int check_cpu __read_mostly = -1;
179 static void check_hung_task(struct task_struct *t, unsigned long now)
181 unsigned long switch_count = t->nvcsw + t->nivcsw;
183 if (t->flags & PF_FROZEN)
186 if (switch_count != t->last_switch_count || !t->last_switch_timestamp) {
187 t->last_switch_count = switch_count;
188 t->last_switch_timestamp = now;
191 if ((long)(now - t->last_switch_timestamp) <
192 sysctl_hung_task_timeout_secs)
194 if (sysctl_hung_task_warnings < 0)
196 sysctl_hung_task_warnings--;
199 * Ok, the task did not get scheduled for more than 2 minutes,
202 printk(KERN_ERR "INFO: task %s:%d blocked for more than "
203 "%ld seconds.\n", t->comm, t->pid,
204 sysctl_hung_task_timeout_secs);
205 printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
206 " disables this message.\n");
208 __debug_show_held_locks(t);
210 t->last_switch_timestamp = now;
211 touch_nmi_watchdog();
213 if (softlockup_panic)
214 panic("softlockup: blocked tasks");
218 * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
219 * a really long time (120 seconds). If that happens, print out
222 static void check_hung_uninterruptible_tasks(int this_cpu)
224 int max_count = sysctl_hung_task_check_count;
225 unsigned long now = get_timestamp(this_cpu);
226 struct task_struct *g, *t;
229 * If the system crashed already then all bets are off,
230 * do not report extra hung tasks:
232 if ((tainted & TAINT_DIE) || did_panic)
235 read_lock(&tasklist_lock);
236 do_each_thread(g, t) {
239 if (t->state & TASK_UNINTERRUPTIBLE)
240 check_hung_task(t, now);
241 } while_each_thread(g, t);
243 read_unlock(&tasklist_lock);
247 * The watchdog thread - runs every second and touches the timestamp.
249 static int watchdog(void *__bind_cpu)
251 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
252 int this_cpu = (long)__bind_cpu;
254 sched_setscheduler(current, SCHED_FIFO, ¶m);
256 /* initialize timestamp */
257 __touch_softlockup_watchdog();
259 set_current_state(TASK_INTERRUPTIBLE);
261 * Run briefly once per second to reset the softlockup timestamp.
262 * If this gets delayed for more than 60 seconds then the
263 * debug-printout triggers in softlockup_tick().
265 while (!kthread_should_stop()) {
266 __touch_softlockup_watchdog();
269 if (kthread_should_stop())
272 if (this_cpu == check_cpu) {
273 if (sysctl_hung_task_timeout_secs)
274 check_hung_uninterruptible_tasks(this_cpu);
277 set_current_state(TASK_INTERRUPTIBLE);
279 __set_current_state(TASK_RUNNING);
285 * Create/destroy watchdog threads as CPUs come and go:
288 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
290 int hotcpu = (unsigned long)hcpu;
291 struct task_struct *p;
295 case CPU_UP_PREPARE_FROZEN:
296 BUG_ON(per_cpu(watchdog_task, hotcpu));
297 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
299 printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
302 per_cpu(touch_timestamp, hotcpu) = 0;
303 per_cpu(watchdog_task, hotcpu) = p;
304 kthread_bind(p, hotcpu);
307 case CPU_ONLINE_FROZEN:
308 check_cpu = any_online_cpu(cpu_online_map);
309 wake_up_process(per_cpu(watchdog_task, hotcpu));
311 #ifdef CONFIG_HOTPLUG_CPU
312 case CPU_DOWN_PREPARE:
313 case CPU_DOWN_PREPARE_FROZEN:
314 if (hotcpu == check_cpu) {
315 cpumask_t temp_cpu_online_map = cpu_online_map;
317 cpu_clear(hotcpu, temp_cpu_online_map);
318 check_cpu = any_online_cpu(temp_cpu_online_map);
322 case CPU_UP_CANCELED:
323 case CPU_UP_CANCELED_FROZEN:
324 if (!per_cpu(watchdog_task, hotcpu))
326 /* Unbind so it can run. Fall thru. */
327 kthread_bind(per_cpu(watchdog_task, hotcpu),
328 any_online_cpu(cpu_online_map));
330 case CPU_DEAD_FROZEN:
331 p = per_cpu(watchdog_task, hotcpu);
332 per_cpu(watchdog_task, hotcpu) = NULL;
335 #endif /* CONFIG_HOTPLUG_CPU */
340 static struct notifier_block __cpuinitdata cpu_nfb = {
341 .notifier_call = cpu_callback
344 __init void spawn_softlockup_task(void)
346 void *cpu = (void *)(long)smp_processor_id();
347 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
349 BUG_ON(err == NOTIFY_BAD);
350 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
351 register_cpu_notifier(&cpu_nfb);
353 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);