Merge branch 'release' of git://lm-sensors.org/kernel/mhoffman/hwmon-2.6
[pandora-kernel.git] / arch / powerpc / kernel / smp.c
index 5191b46..5337ca7 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/smp.h>
 #include <asm/time.h>
 #include <asm/machdep.h>
+#include <asm/cputhreads.h>
 #include <asm/cputable.h>
 #include <asm/system.h>
 #include <asm/mpic.h>
@@ -62,10 +63,12 @@ struct thread_info *secondary_ti;
 cpumask_t cpu_possible_map = CPU_MASK_NONE;
 cpumask_t cpu_online_map = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
+DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
 
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /* SMP operations for this machine */
 struct smp_ops_t *smp_ops;
@@ -171,12 +174,8 @@ void smp_send_stop(void)
        smp_call_function(stop_this_cpu, NULL, 0);
 }
 
-extern struct gettimeofday_struct do_gtod;
-
 struct thread_info *current_set[NR_CPUS];
 
-DECLARE_PER_CPU(unsigned int, pvr);
-
 static void __devinit smp_store_cpu_info(int id)
 {
        per_cpu(pvr, id) = mfspr(SPRN_PVR);
@@ -232,6 +231,8 @@ void __devinit smp_prepare_boot_cpu(void)
        BUG_ON(smp_processor_id() != boot_cpuid);
 
        cpu_set(boot_cpuid, cpu_online_map);
+       cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
+       cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
 #ifdef CONFIG_PPC64
        paca[boot_cpuid].__current = current;
 #endif
@@ -379,11 +380,60 @@ int __cpuinit __cpu_up(unsigned int cpu)
        return 0;
 }
 
+/* Return the value of the reg property corresponding to the given
+ * logical cpu.
+ */
+int cpu_to_core_id(int cpu)
+{
+       struct device_node *np;
+       const int *reg;
+       int id = -1;
+
+       np = of_get_cpu_node(cpu, NULL);
+       if (!np)
+               goto out;
+
+       reg = of_get_property(np, "reg", NULL);
+       if (!reg)
+               goto out;
+
+       id = *reg;
+out:
+       of_node_put(np);
+       return id;
+}
+
+/* Must be called when no change can occur to cpu_present_map,
+ * i.e. during cpu online or offline.
+ */
+static struct device_node *cpu_to_l2cache(int cpu)
+{
+       struct device_node *np;
+       const phandle *php;
+       phandle ph;
+
+       if (!cpu_present(cpu))
+               return NULL;
+
+       np = of_get_cpu_node(cpu, NULL);
+       if (np == NULL)
+               return NULL;
+
+       php = of_get_property(np, "l2-cache", NULL);
+       if (php == NULL)
+               return NULL;
+       ph = *php;
+       of_node_put(np);
+
+       return of_find_node_by_phandle(ph);
+}
 
 /* Activate a secondary processor. */
 int __devinit start_secondary(void *unused)
 {
        unsigned int cpu = smp_processor_id();
+       struct device_node *l2_cache;
+       int i, base;
 
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
@@ -404,6 +454,33 @@ int __devinit start_secondary(void *unused)
 
        ipi_call_lock();
        cpu_set(cpu, cpu_online_map);
+       /* Update sibling maps */
+       base = cpu_first_thread_in_core(cpu);
+       for (i = 0; i < threads_per_core; i++) {
+               if (cpu_is_offline(base + i))
+                       continue;
+               cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
+               cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
+
+               /* cpu_core_map should be a superset of
+                * cpu_sibling_map even if we don't have cache
+                * information, so update the former here, too.
+                */
+               cpu_set(cpu, per_cpu(cpu_core_map, base +i));
+               cpu_set(base + i, per_cpu(cpu_core_map, cpu));
+       }
+       l2_cache = cpu_to_l2cache(cpu);
+       for_each_online_cpu(i) {
+               struct device_node *np = cpu_to_l2cache(i);
+               if (!np)
+                       continue;
+               if (np == l2_cache) {
+                       cpu_set(cpu, per_cpu(cpu_core_map, i));
+                       cpu_set(i, per_cpu(cpu_core_map, cpu));
+               }
+               of_node_put(np);
+       }
+       of_node_put(l2_cache);
        ipi_call_unlock();
 
        local_irq_enable();
@@ -441,10 +518,42 @@ void __init smp_cpus_done(unsigned int max_cpus)
 #ifdef CONFIG_HOTPLUG_CPU
 int __cpu_disable(void)
 {
-       if (smp_ops->cpu_disable)
-               return smp_ops->cpu_disable();
+       struct device_node *l2_cache;
+       int cpu = smp_processor_id();
+       int base, i;
+       int err;
 
-       return -ENOSYS;
+       if (!smp_ops->cpu_disable)
+               return -ENOSYS;
+
+       err = smp_ops->cpu_disable();
+       if (err)
+               return err;
+
+       /* Update sibling maps */
+       base = cpu_first_thread_in_core(cpu);
+       for (i = 0; i < threads_per_core; i++) {
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
+               cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
+               cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
+               cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
+       }
+
+       l2_cache = cpu_to_l2cache(cpu);
+       for_each_present_cpu(i) {
+               struct device_node *np = cpu_to_l2cache(i);
+               if (!np)
+                       continue;
+               if (np == l2_cache) {
+                       cpu_clear(cpu, per_cpu(cpu_core_map, i));
+                       cpu_clear(i, per_cpu(cpu_core_map, cpu));
+               }
+               of_node_put(np);
+       }
+       of_node_put(l2_cache);
+
+
+       return 0;
 }
 
 void __cpu_die(unsigned int cpu)