Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[pandora-kernel.git] / arch / sparc / kernel / smp_64.c
index 3e94a8c..4a442c3 100644 (file)
@@ -28,7 +28,7 @@
 
 #include <asm/head.h>
 #include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/cpudata.h>
@@ -121,11 +121,11 @@ void __cpuinit smp_callin(void)
        /* inform the notifiers about the new cpu */
        notify_cpu_starting(cpuid);
 
-       while (!cpu_isset(cpuid, smp_commenced_mask))
+       while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
                rmb();
 
        ipi_call_lock_irq();
-       cpu_set(cpuid, cpu_online_map);
+       set_cpu_online(cpuid, true);
        ipi_call_unlock_irq();
 
        /* idle thread is expected to have preempt disabled */
@@ -785,7 +785,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask
 
 /* Send cross call to all processors mentioned in MASK_P
  * except self.  Really, there are only two cases currently,
- * "&cpu_online_map" and "&mm->cpu_vm_mask".
+ * "cpu_online_mask" and "mm_cpumask(mm)".
  */
 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
 {
@@ -797,7 +797,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
 /* Send cross call to all processors except self. */
 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
 {
-       smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map);
+       smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
 }
 
 extern unsigned long xcall_sync_tick;
@@ -805,7 +805,7 @@ extern unsigned long xcall_sync_tick;
 static void smp_start_sync_tick_client(int cpu)
 {
        xcall_deliver((u64) &xcall_sync_tick, 0, 0,
-                     &cpumask_of_cpu(cpu));
+                     cpumask_of(cpu));
 }
 
 extern unsigned long xcall_call_function;
@@ -820,7 +820,7 @@ extern unsigned long xcall_call_function_single;
 void arch_send_call_function_single_ipi(int cpu)
 {
        xcall_deliver((u64) &xcall_call_function_single, 0, 0,
-                     &cpumask_of_cpu(cpu));
+                     cpumask_of(cpu));
 }
 
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
@@ -918,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
                }
                if (data0) {
                        xcall_deliver(data0, __pa(pg_addr),
-                                     (u64) pg_addr, &cpumask_of_cpu(cpu));
+                                     (u64) pg_addr, cpumask_of(cpu));
 #ifdef CONFIG_DEBUG_DCFLUSH
                        atomic_inc(&dcpage_flushes_xcall);
 #endif
@@ -954,7 +954,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        }
        if (data0) {
                xcall_deliver(data0, __pa(pg_addr),
-                             (u64) pg_addr, &cpu_online_map);
+                             (u64) pg_addr, cpu_online_mask);
 #ifdef CONFIG_DEBUG_DCFLUSH
                atomic_inc(&dcpage_flushes_xcall);
 #endif
@@ -1197,32 +1197,32 @@ void __devinit smp_fill_in_sib_core_maps(void)
        for_each_present_cpu(i) {
                unsigned int j;
 
-               cpus_clear(cpu_core_map[i]);
+               cpumask_clear(&cpu_core_map[i]);
                if (cpu_data(i).core_id == 0) {
-                       cpu_set(i, cpu_core_map[i]);
+                       cpumask_set_cpu(i, &cpu_core_map[i]);
                        continue;
                }
 
                for_each_present_cpu(j) {
                        if (cpu_data(i).core_id ==
                            cpu_data(j).core_id)
-                               cpu_set(j, cpu_core_map[i]);
+                               cpumask_set_cpu(j, &cpu_core_map[i]);
                }
        }
 
        for_each_present_cpu(i) {
                unsigned int j;
 
-               cpus_clear(per_cpu(cpu_sibling_map, i));
+               cpumask_clear(&per_cpu(cpu_sibling_map, i));
                if (cpu_data(i).proc_id == -1) {
-                       cpu_set(i, per_cpu(cpu_sibling_map, i));
+                       cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
                        continue;
                }
 
                for_each_present_cpu(j) {
                        if (cpu_data(i).proc_id ==
                            cpu_data(j).proc_id)
-                               cpu_set(j, per_cpu(cpu_sibling_map, i));
+                               cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
                }
        }
 }
@@ -1232,10 +1232,10 @@ int __cpuinit __cpu_up(unsigned int cpu)
        int ret = smp_boot_one_cpu(cpu);
 
        if (!ret) {
-               cpu_set(cpu, smp_commenced_mask);
-               while (!cpu_isset(cpu, cpu_online_map))
+               cpumask_set_cpu(cpu, &smp_commenced_mask);
+               while (!cpu_online(cpu))
                        mb();
-               if (!cpu_isset(cpu, cpu_online_map)) {
+               if (!cpu_online(cpu)) {
                        ret = -ENODEV;
                } else {
                        /* On SUN4V, writes to %tick and %stick are
@@ -1269,7 +1269,7 @@ void cpu_play_dead(void)
                                tb->nonresum_mondo_pa, 0);
        }
 
-       cpu_clear(cpu, smp_commenced_mask);
+       cpumask_clear_cpu(cpu, &smp_commenced_mask);
        membar_safe("#Sync");
 
        local_irq_disable();
@@ -1290,13 +1290,13 @@ int __cpu_disable(void)
        cpuinfo_sparc *c;
        int i;
 
-       for_each_cpu_mask(i, cpu_core_map[cpu])
-               cpu_clear(cpu, cpu_core_map[i]);
-       cpus_clear(cpu_core_map[cpu]);
+       for_each_cpu(i, &cpu_core_map[cpu])
+               cpumask_clear_cpu(cpu, &cpu_core_map[i]);
+       cpumask_clear(&cpu_core_map[cpu]);
 
-       for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
+       for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
+               cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
+       cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
 
        c = &cpu_data(cpu);
 
@@ -1313,7 +1313,7 @@ int __cpu_disable(void)
        local_irq_disable();
 
        ipi_call_lock();
-       cpu_clear(cpu, cpu_online_map);
+       set_cpu_online(cpu, false);
        ipi_call_unlock();
 
        cpu_map_rebuild();
@@ -1327,11 +1327,11 @@ void __cpu_die(unsigned int cpu)
 
        for (i = 0; i < 100; i++) {
                smp_rmb();
-               if (!cpu_isset(cpu, smp_commenced_mask))
+               if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
                        break;
                msleep(100);
        }
-       if (cpu_isset(cpu, smp_commenced_mask)) {
+       if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
                printk(KERN_ERR "CPU %u didn't die...\n", cpu);
        } else {
 #if defined(CONFIG_SUN_LDOMS)
@@ -1341,7 +1341,7 @@ void __cpu_die(unsigned int cpu)
                do {
                        hv_err = sun4v_cpu_stop(cpu);
                        if (hv_err == HV_EOK) {
-                               cpu_clear(cpu, cpu_present_map);
+                               set_cpu_present(cpu, false);
                                break;
                        }
                } while (--limit > 0);
@@ -1362,12 +1362,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
 void smp_send_reschedule(int cpu)
 {
        xcall_deliver((u64) &xcall_receive_signal, 0, 0,
-                     &cpumask_of_cpu(cpu));
+                     cpumask_of(cpu));
 }
 
 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
+       scheduler_ipi();
 }
 
 /* This is a nop because we capture all other cpus