Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / arch / x86 / kernel / smpboot.c
index e02653a..9f548cb 100644 (file)
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused)
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
        x86_platform.nmi_init();
 
+       /*
+        * Wait until the cpu which brought this one up marked it
+        * online before enabling interrupts. If we don't do that then
+        * we can end up waking up the softirq thread before this cpu
+        * reached the active state, which makes the scheduler unhappy
+        * and schedule the softirq thread on the wrong cpu. This is
+        * only observable with forced threaded interrupts, but in
+        * theory it could also happen w/o them. It's just way harder
+        * to achieve.
+        */
+       while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
+               cpu_relax();
+
        /* enable local interrupts */
        local_irq_enable();
 
@@ -1307,7 +1320,7 @@ void play_dead_common(void)
 {
        idle_task_exit();
        reset_lazy_tlbstate();
-       c1e_remove_cpu(raw_smp_processor_id());
+       amd_e400_remove_cpu(raw_smp_processor_id());
 
        mb();
        /* Ack it */
@@ -1332,7 +1345,7 @@ static inline void mwait_play_dead(void)
        void *mwait_ptr;
        struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
 
-       if (!this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))
+       if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
                return;
        if (!this_cpu_has(X86_FEATURE_CLFLSH))
                return;