Merge branch 'linus' into tracing/mmiotrace-mergefixups
[pandora-kernel.git] / arch / x86 / kernel / process_64.c
index 891af1a..ea090e6 100644 (file)
@@ -106,26 +106,13 @@ void default_idle(void)
         * test NEED_RESCHED:
         */
        smp_mb();
-       local_irq_disable();
-       if (!need_resched()) {
+       if (!need_resched())
                safe_halt();    /* enables interrupts racelessly */
-               local_irq_disable();
-       }
-       local_irq_enable();
+       else
+               local_irq_enable();
        current_thread_info()->status |= TS_POLLING;
 }
 
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
-{
-       local_irq_enable();
-       cpu_relax();
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 DECLARE_PER_CPU(int, cpu_state);
 
@@ -178,7 +165,10 @@ void cpu_idle(void)
                         */
                        local_irq_disable();
                        enter_idle();
+                       /* Don't trace irqs off for idle */
+                       stop_critical_timings();
                        idle();
+                       start_critical_timings();
                        /* In many cases the interrupt that ended idle
                           has already called exit_idle. But some idle
                           loops can be woken up without interrupt. */
@@ -192,110 +182,6 @@ void cpu_idle(void)
        }
 }
 
-static void do_nothing(void *unused)
-{
-}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
- * pm_idle and update to new pm_idle value. Required while changing pm_idle
- * handler on SMP systems.
- *
- * Caller must have changed pm_idle to the new value before the call. Old
- * pm_idle value will not be used by any CPU after the return of this function.
- */
-void cpu_idle_wait(void)
-{
-       smp_mb();
-       /* kick all the CPUs so that they exit out of pm_idle */
-       smp_call_function(do_nothing, NULL, 0, 1);
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
-       if (!need_resched()) {
-               __monitor((void *)&current_thread_info()->flags, 0, 0);
-               smp_mb();
-               if (!need_resched())
-                       __mwait(ax, cx);
-       }
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
-       if (!need_resched()) {
-               __monitor((void *)&current_thread_info()->flags, 0, 0);
-               smp_mb();
-               if (!need_resched())
-                       __sti_mwait(0, 0);
-               else
-                       local_irq_enable();
-       } else {
-               local_irq_enable();
-       }
-}
-
-
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
-{
-       if (force_mwait)
-               return 1;
-       /* Any C1 states supported? */
-       return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
-}
-
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
-{
-       static int selected;
-
-       if (selected)
-               return;
-#ifdef CONFIG_X86_SMP
-       if (pm_idle == poll_idle && smp_num_siblings > 1) {
-               printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
-                       " performance may degrade.\n");
-       }
-#endif
-       if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
-               /*
-                * Skip, if setup has overridden idle.
-                * One CPU supports mwait => All CPUs supports mwait
-                */
-               if (!pm_idle) {
-                       printk(KERN_INFO "using mwait in idle threads.\n");
-                       pm_idle = mwait_idle;
-               }
-       }
-       selected = 1;
-}
-
-static int __init idle_setup(char *str)
-{
-       if (!strcmp(str, "poll")) {
-               printk("using polling idle threads.\n");
-               pm_idle = poll_idle;
-       } else if (!strcmp(str, "mwait"))
-               force_mwait = 1;
-       else
-               return -1;
-
-       boot_option_idle_override = 1;
-       return 0;
-}
-early_param("idle", idle_setup);
-
 /* Prints also some state that isn't saved in the pt_regs */
 void __show_regs(struct pt_regs * regs)
 {
@@ -411,6 +297,7 @@ void flush_thread(void)
        /*
         * Forget coprocessor state..
         */
+       tsk->fpu_counter = 0;
        clear_fpu(tsk);
        clear_used_math();
 }
@@ -562,7 +449,7 @@ static void hard_enable_TSC(void)
        write_cr4(read_cr4() & ~X86_CR4_TSD);
 }
 
-void enable_TSC(void)
+static void enable_TSC(void)
 {
        preempt_disable();
        if (test_and_clear_thread_flag(TIF_NOTSC))
@@ -775,8 +662,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        /* If the task has used fpu the last 5 timeslices, just do a full
         * restore of the math state immediately to avoid the trap; the
         * chances of needing FPU soon are obviously high now
+        *
+        * tsk_used_math() checks prevent calling math_state_restore(),
+        * which can sleep in the case of !tsk_used_math()
         */
-       if (next_p->fpu_counter>5)
+       if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
                math_state_restore();
        return prev_p;
 }