x86/smp: Don't ever patch back to UP if we unplug cpus
[pandora-kernel.git] / arch / x86 / kernel / smpboot.c
index 33a0c11..a5591d4 100644 (file)
@@ -265,6 +265,13 @@ notrace static void __cpuinit start_secondary(void *unused)
         */
        check_tsc_sync_target();
 
+       /*
+        * Enable the espfix hack for this CPU
+        */
+#ifdef CONFIG_X86_ESPFIX64
+       init_espfix_ap();
+#endif
+
        /*
         * We need to hold call_lock, so there is no inconsistency
         * between the time smp_call_function() determines number of
@@ -285,6 +292,19 @@ notrace static void __cpuinit start_secondary(void *unused)
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
        x86_platform.nmi_init();
 
+       /*
+        * Wait until the cpu which brought this one up marked it
+        * online before enabling interrupts. If we don't do that then
+        * we can end up waking up the softirq thread before this cpu
+        * reached the active state, which makes the scheduler unhappy
+        * and schedule the softirq thread on the wrong cpu. This is
+        * only observable with forced threaded interrupts, but in
+        * theory it could also happen w/o them. It's just way harder
+        * to achieve.
+        */
+       while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
+               cpu_relax();
+
        /* enable local interrupts */
        local_irq_enable();
 
@@ -425,7 +445,7 @@ static void impress_friends(void)
 void __inquire_remote_apic(int apicid)
 {
        unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
-       char *names[] = { "ID", "VERSION", "SPIV" };
+       const char * const names[] = { "ID", "VERSION", "SPIV" };
        int timeout;
        u32 status;
 
@@ -669,7 +689,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
 
        INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
 
-       alternatives_smp_switch(1);
+       /* Just in case we booted with a single CPU. */
+       alternatives_enable_smp();
 
        c_idle.idle = get_idle_for_cpu(cpu);
 
@@ -1089,20 +1110,6 @@ out:
        preempt_enable();
 }
 
-void arch_disable_nonboot_cpus_begin(void)
-{
-       /*
-        * Avoid the smp alternatives switch during the disable_nonboot_cpus().
-        * In the suspend path, we will be back in the SMP mode shortly anyways.
-        */
-       skip_smp_alternatives = true;
-}
-
-void arch_disable_nonboot_cpus_end(void)
-{
-       skip_smp_alternatives = false;
-}
-
 void arch_enable_nonboot_cpus_begin(void)
 {
        set_mtrr_aps_delayed_init();
@@ -1232,6 +1239,9 @@ static void remove_siblinginfo(int cpu)
 
        for_each_cpu(sibling, cpu_sibling_mask(cpu))
                cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
+       for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
+               cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
+       cpumask_clear(cpu_llc_shared_mask(cpu));
        cpumask_clear(cpu_sibling_mask(cpu));
        cpumask_clear(cpu_core_mask(cpu));
        c->phys_proc_id = 0;
@@ -1265,6 +1275,7 @@ void cpu_disable_common(void)
 int native_cpu_disable(void)
 {
        int cpu = smp_processor_id();
+       int ret;
 
        /*
         * Perhaps use cpufreq to drop frequency, but that could go
@@ -1277,6 +1288,10 @@ int native_cpu_disable(void)
        if (cpu == 0)
                return -EBUSY;
 
+       ret = check_irq_vectors_for_cpu_disable();
+       if (ret)
+               return ret;
+
        clear_local_APIC();
 
        cpu_disable_common();
@@ -1293,9 +1308,6 @@ void native_cpu_die(unsigned int cpu)
                if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
                        if (system_state == SYSTEM_RUNNING)
                                pr_info("CPU %u is now offline\n", cpu);
-
-                       if (1 == num_online_cpus())
-                               alternatives_smp_switch(0);
                        return;
                }
                msleep(100);