Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[pandora-kernel.git] / kernel / softlockup.c
index c1d7655..c828c23 100644 (file)
@@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu)
        return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
 }
 
-void touch_softlockup_watchdog(void)
+static void __touch_softlockup_watchdog(void)
 {
        int this_cpu = raw_smp_processor_id();
 
        __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
 }
+
+void touch_softlockup_watchdog(void)
+{
+       __raw_get_cpu_var(touch_timestamp) = 0;
+}
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
 void touch_all_softlockup_watchdogs(void)
@@ -80,7 +85,7 @@ void softlockup_tick(void)
        unsigned long now;
 
        if (touch_timestamp == 0) {
-               touch_softlockup_watchdog();
+               __touch_softlockup_watchdog();
                return;
        }
 
@@ -95,12 +100,16 @@ void softlockup_tick(void)
 
        /* do not print during early bootup: */
        if (unlikely(system_state != SYSTEM_RUNNING)) {
-               touch_softlockup_watchdog();
+               __touch_softlockup_watchdog();
                return;
        }
 
        now = get_timestamp(this_cpu);
 
+       /* Wake up the high-prio watchdog task every second: */
+       if (now > (touch_timestamp + 1))
+               wake_up_process(per_cpu(watchdog_task, this_cpu));
+
        /* Warn about unreasonable delays: */
        if (now <= (touch_timestamp + softlockup_thresh))
                return;
@@ -191,11 +200,11 @@ static void check_hung_uninterruptible_tasks(int this_cpu)
        read_lock(&tasklist_lock);
        do_each_thread(g, t) {
                if (!--max_count)
-                       break;
+                       goto unlock;
                if (t->state & TASK_UNINTERRUPTIBLE)
                        check_hung_task(t, now);
        } while_each_thread(g, t);
-
+ unlock:
        read_unlock(&tasklist_lock);
 }
 
@@ -210,23 +219,29 @@ static int watchdog(void *__bind_cpu)
        sched_setscheduler(current, SCHED_FIFO, &param);
 
        /* initialize timestamp */
-       touch_softlockup_watchdog();
+       __touch_softlockup_watchdog();
 
+       set_current_state(TASK_INTERRUPTIBLE);
        /*
         * Run briefly once per second to reset the softlockup timestamp.
         * If this gets delayed for more than 60 seconds then the
         * debug-printout triggers in softlockup_tick().
         */
        while (!kthread_should_stop()) {
-               touch_softlockup_watchdog();
-               msleep_interruptible(10000);
+               __touch_softlockup_watchdog();
+               schedule();
+
+               if (kthread_should_stop())
+                       break;
 
-               if (this_cpu != check_cpu)
-                       continue;
+               if (this_cpu == check_cpu) {
+                       if (sysctl_hung_task_timeout_secs)
+                               check_hung_uninterruptible_tasks(this_cpu);
+               }
 
-               if (sysctl_hung_task_timeout_secs)
-                       check_hung_uninterruptible_tasks(this_cpu);
+               set_current_state(TASK_INTERRUPTIBLE);
        }
+       __set_current_state(TASK_RUNNING);
 
        return 0;
 }
@@ -259,13 +274,6 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
                wake_up_process(per_cpu(watchdog_task, hotcpu));
                break;
 #ifdef CONFIG_HOTPLUG_CPU
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-               if (!per_cpu(watchdog_task, hotcpu))
-                       break;
-               /* Unbind so it can run.  Fall thru. */
-               kthread_bind(per_cpu(watchdog_task, hotcpu),
-                            any_online_cpu(cpu_online_map));
        case CPU_DOWN_PREPARE:
        case CPU_DOWN_PREPARE_FROZEN:
                if (hotcpu == check_cpu) {
@@ -275,6 +283,14 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
                        check_cpu = any_online_cpu(temp_cpu_online_map);
                }
                break;
+
+       case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
+               if (!per_cpu(watchdog_task, hotcpu))
+                       break;
+               /* Unbind so it can run.  Fall thru. */
+               kthread_bind(per_cpu(watchdog_task, hotcpu),
+                            any_online_cpu(cpu_online_map));
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
                p = per_cpu(watchdog_task, hotcpu);