Merge branch 'linus' into cpus4096
[pandora-kernel.git] / kernel / cpu.c
index 2011ad8..b21ba7e 100644 (file)
 #include <linux/stop_machine.h>
 #include <linux/mutex.h>
 
+/*
+ * Represents all cpu's present in the system
+ * In systems capable of hotplug, this map could dynamically grow
+ * as new cpu's are detected in the system via any platform specific
+ * method, such as ACPI for e.g.
+ */
+cpumask_t cpu_present_map __read_mostly;
+EXPORT_SYMBOL(cpu_present_map);
+
+#ifndef CONFIG_SMP
+
+/*
+ * Represents all cpu's that are currently online.
+ */
+cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
+EXPORT_SYMBOL(cpu_online_map);
+
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
+EXPORT_SYMBOL(cpu_possible_map);
+
+#else /* CONFIG_SMP */
+
 /* Serializes the updates to cpu_online_map, cpu_present_map */
 static DEFINE_MUTEX(cpu_add_remove_lock);
 
@@ -33,17 +55,13 @@ static struct {
         * an ongoing cpu hotplug operation.
         */
        int refcount;
-       wait_queue_head_t writer_queue;
 } cpu_hotplug;
 
-#define writer_exists() (cpu_hotplug.active_writer != NULL)
-
 void __init cpu_hotplug_init(void)
 {
        cpu_hotplug.active_writer = NULL;
        mutex_init(&cpu_hotplug.lock);
        cpu_hotplug.refcount = 0;
-       init_waitqueue_head(&cpu_hotplug.writer_queue);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -65,11 +83,8 @@ void put_online_cpus(void)
        if (cpu_hotplug.active_writer == current)
                return;
        mutex_lock(&cpu_hotplug.lock);
-       cpu_hotplug.refcount--;
-
-       if (unlikely(writer_exists()) && !cpu_hotplug.refcount)
-               wake_up(&cpu_hotplug.writer_queue);
-
+       if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
+               wake_up_process(cpu_hotplug.active_writer);
        mutex_unlock(&cpu_hotplug.lock);
 
 }
@@ -98,8 +113,8 @@ void cpu_maps_update_done(void)
  * Note that during a cpu-hotplug operation, the new readers, if any,
  * will be blocked by the cpu_hotplug.lock
  *
- * Since cpu_maps_update_begin is always called after invoking
- * cpu_maps_update_begin, we can be sure that only one writer is active.
+ * Since cpu_hotplug_begin() is always called after invoking
+ * cpu_maps_update_begin(), we can be sure that only one writer is active.
  *
  * Note that theoretically, there is a possibility of a livelock:
  * - Refcount goes to zero, last reader wakes up the sleeping
@@ -115,19 +130,16 @@ void cpu_maps_update_done(void)
  */
 static void cpu_hotplug_begin(void)
 {
-       DECLARE_WAITQUEUE(wait, current);
-
-       mutex_lock(&cpu_hotplug.lock);
-
        cpu_hotplug.active_writer = current;
-       add_wait_queue_exclusive(&cpu_hotplug.writer_queue, &wait);
-       while (cpu_hotplug.refcount) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
+
+       for (;;) {
+               mutex_lock(&cpu_hotplug.lock);
+               if (likely(!cpu_hotplug.refcount))
+                       break;
+               __set_current_state(TASK_UNINTERRUPTIBLE);
                mutex_unlock(&cpu_hotplug.lock);
                schedule();
-               mutex_lock(&cpu_hotplug.lock);
        }
-       remove_wait_queue_locked(&cpu_hotplug.writer_queue, &wait);
 }
 
 static void cpu_hotplug_done(void)
@@ -136,7 +148,7 @@ static void cpu_hotplug_done(void)
        mutex_unlock(&cpu_hotplug.lock);
 }
 /* Need to know about CPUs going up/down? */
-int __cpuinit register_cpu_notifier(struct notifier_block *nb)
+int __ref register_cpu_notifier(struct notifier_block *nb)
 {
        int ret;
        cpu_maps_update_begin();
@@ -149,7 +161,7 @@ int __cpuinit register_cpu_notifier(struct notifier_block *nb)
 
 EXPORT_SYMBOL(register_cpu_notifier);
 
-void unregister_cpu_notifier(struct notifier_block *nb)
+void __ref unregister_cpu_notifier(struct notifier_block *nb)
 {
        cpu_maps_update_begin();
        raw_notifier_chain_unregister(&cpu_chain, nb);
@@ -180,7 +192,7 @@ struct take_cpu_down_param {
 };
 
 /* Take this CPU down. */
-static int take_cpu_down(void *_param)
+static int __ref take_cpu_down(void *_param)
 {
        struct take_cpu_down_param *param = _param;
        int err;
@@ -199,7 +211,7 @@ static int take_cpu_down(void *_param)
 }
 
 /* Requires cpu_add_remove_lock to be held */
-static int _cpu_down(unsigned int cpu, int tasks_frozen)
+static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 {
        int err, nr_calls = 0;
        struct task_struct *p;
@@ -225,7 +237,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
                __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
                                          hcpu, nr_calls, NULL);
                printk("%s: attempt to take down CPU %u failed\n",
-                               __FUNCTION__, cpu);
+                               __func__, cpu);
                err = -EINVAL;
                goto out_release;
        }
@@ -274,7 +286,7 @@ out_release:
        return err;
 }
 
-int cpu_down(unsigned int cpu)
+int __ref cpu_down(unsigned int cpu)
 {
        int err = 0;
 
@@ -305,7 +317,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
        if (ret == NOTIFY_BAD) {
                nr_calls--;
                printk("%s: attempt to bring up CPU %u failed\n",
-                               __FUNCTION__, cpu);
+                               __func__, cpu);
                ret = -EINVAL;
                goto out_notify;
        }
@@ -400,7 +412,7 @@ void __ref enable_nonboot_cpus(void)
                goto out;
 
        printk("Enabling non-boot CPUs ...\n");
-       for_each_cpu_mask(cpu, frozen_cpus) {
+       for_each_cpu_mask_nr(cpu, frozen_cpus) {
                error = _cpu_up(cpu, 1);
                if (!error) {
                        printk("CPU%d is up\n", cpu);
@@ -413,3 +425,5 @@ out:
        cpu_maps_update_done();
 }
 #endif /* CONFIG_PM_SLEEP_SMP */
+
+#endif /* CONFIG_SMP */