cpuidle: stop depending on pm_idle
authorLen Brown <len.brown@intel.com>
Fri, 1 Apr 2011 23:34:59 +0000 (19:34 -0400)
committerLen Brown <len.brown@intel.com>
Wed, 3 Aug 2011 23:06:37 +0000 (19:06 -0400)
cpuidle users should call cpuidle_call_idle() directly
rather than via (pm_idle)() function pointer.

Architecture may choose to continue using (pm_idle)(),
but cpuidle need not depend on it:

  my_arch_cpu_idle()
...
if(cpuidle_call_idle())
pm_idle();

cc: Kevin Hilman <khilman@deeprootsystems.com>
cc: Paul Mundt <lethal@linux-sh.org>
cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
arch/arm/kernel/process.c
arch/sh/kernel/idle.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
drivers/cpuidle/cpuidle.c
include/linux/cpuidle.h

index 5e1e541..d7ee0d4 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/uaccess.h>
 #include <linux/random.h>
 #include <linux/hw_breakpoint.h>
+#include <linux/cpuidle.h>
 
 #include <asm/cacheflush.h>
 #include <asm/leds.h>
@@ -196,7 +197,8 @@ void cpu_idle(void)
                                cpu_relax();
                        } else {
                                stop_critical_timings();
-                               pm_idle();
+                               if (cpuidle_call_idle())
+                                       pm_idle();
                                start_critical_timings();
                                /*
                                 * This will eventually be removed - pm_idle
index 425d604..9c7099e 100644 (file)
 #include <linux/thread_info.h>
 #include <linux/irqflags.h>
 #include <linux/smp.h>
+#include <linux/cpuidle.h>
 #include <asm/pgalloc.h>
 #include <asm/system.h>
 #include <asm/atomic.h>
 #include <asm/smp.h>
 
-void (*pm_idle)(void) = NULL;
+static void (*pm_idle)(void);
 
 static int hlt_counter;
 
@@ -100,7 +101,8 @@ void cpu_idle(void)
                        local_irq_disable();
                        /* Don't trace irqs off for idle */
                        stop_critical_timings();
-                       pm_idle();
+                       if (cpuidle_call_idle())
+                               pm_idle();
                        /*
                         * Sanity check to ensure that pm_idle() returns
                         * with IRQs enabled
index a3d0dc5..7a3b651 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/kdebug.h>
+#include <linux/cpuidle.h>
 
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -109,7 +110,8 @@ void cpu_idle(void)
                        local_irq_disable();
                        /* Don't trace irqs off for idle */
                        stop_critical_timings();
-                       pm_idle();
+                       if (cpuidle_idle_call())
+                               pm_idle();
                        start_critical_timings();
                }
                tick_nohz_restart_sched_tick();
index ca6f7ab..f693e44 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/ftrace.h>
+#include <linux/cpuidle.h>
 
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -136,7 +137,8 @@ void cpu_idle(void)
                        enter_idle();
                        /* Don't trace irqs off for idle */
                        stop_critical_timings();
-                       pm_idle();
+                       if (cpuidle_idle_call())
+                               pm_idle();
                        start_critical_timings();
 
                        /* In many cases the interrupt that ended idle
index 041df0b..d4c5423 100644 (file)
@@ -25,10 +25,10 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
 
 DEFINE_MUTEX(cpuidle_lock);
 LIST_HEAD(cpuidle_detected_devices);
-static void (*pm_idle_old)(void);
 
 static int enabled_devices;
 static int off __read_mostly;
+static int initialized __read_mostly;
 
 int cpuidle_disabled(void)
 {
@@ -56,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
  * cpuidle_idle_call - the main idle loop
  *
  * NOTE: no locks or semaphores should be used here
+ * return non-zero on failure
  */
-static void cpuidle_idle_call(void)
+int cpuidle_idle_call(void)
 {
        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_state *target_state;
        int next_state;
 
+       if (off)
+               return -ENODEV;
+
+       if (!initialized)
+               return -ENODEV;
+
        /* check if the device is ready */
-       if (!dev || !dev->enabled) {
-               if (pm_idle_old)
-                       pm_idle_old();
-               else
-#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
-                       default_idle();
-#else
-                       local_irq_enable();
-#endif
-               return;
-       }
+       if (!dev || !dev->enabled)
+               return -EBUSY;
 
 #if 0
        /* shows regressions, re-enable for 2.6.29 */
@@ -99,7 +97,7 @@ static void cpuidle_idle_call(void)
        next_state = cpuidle_curr_governor->select(dev);
        if (need_resched()) {
                local_irq_enable();
-               return;
+               return 0;
        }
 
        target_state = &dev->states[next_state];
@@ -124,6 +122,8 @@ static void cpuidle_idle_call(void)
        /* give the governor an opportunity to reflect on the outcome */
        if (cpuidle_curr_governor->reflect)
                cpuidle_curr_governor->reflect(dev);
+
+       return 0;
 }
 
 /**
@@ -131,10 +131,10 @@ static void cpuidle_idle_call(void)
  */
 void cpuidle_install_idle_handler(void)
 {
-       if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
+       if (enabled_devices) {
                /* Make sure all changes finished before we switch to new idle */
                smp_wmb();
-               pm_idle = cpuidle_idle_call;
+               initialized = 1;
        }
 }
 
@@ -143,8 +143,8 @@ void cpuidle_install_idle_handler(void)
  */
 void cpuidle_uninstall_idle_handler(void)
 {
-       if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) {
-               pm_idle = pm_idle_old;
+       if (enabled_devices) {
+               initialized = 0;
                cpuidle_kick_cpus();
        }
 }
@@ -440,8 +440,6 @@ static int __init cpuidle_init(void)
        if (cpuidle_disabled())
                return -ENODEV;
 
-       pm_idle_old = pm_idle;
-
        ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
        if (ret)
                return ret;
index b89f67d..b51629e 100644 (file)
@@ -123,6 +123,7 @@ struct cpuidle_driver {
 
 #ifdef CONFIG_CPU_IDLE
 extern void disable_cpuidle(void);
+extern int cpuidle_idle_call(void);
 
 extern int cpuidle_register_driver(struct cpuidle_driver *drv);
 struct cpuidle_driver *cpuidle_get_driver(void);
@@ -137,6 +138,7 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
 
 #else
 static inline void disable_cpuidle(void) { }
+static inline int cpuidle_idle_call(void) { return -ENODEV; }
 
 static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
 {return -ENODEV; }