Merge branches 'release', 'asus', 'sony-laptop' and 'thinkpad' into release
[pandora-kernel.git] / arch / x86 / kernel / process_32.c
index 3744cf6..dabdbef 100644 (file)
@@ -75,7 +75,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number);
  */
 unsigned long thread_saved_pc(struct task_struct *tsk)
 {
-       return ((unsigned long *)tsk->thread.esp)[3];
+       return ((unsigned long *)tsk->thread.sp)[3];
 }
 
 /*
@@ -142,7 +142,7 @@ EXPORT_SYMBOL(default_idle);
  * to poll the ->work.need_resched flag instead of waiting for the
  * cross-CPU IPI to arrive. Use this option with caution.
  */
-static void poll_idle (void)
+static void poll_idle(void)
 {
        cpu_relax();
 }
@@ -198,6 +198,9 @@ void cpu_idle(void)
                        rmb();
                        idle = pm_idle;
 
+                       if (rcu_pending(cpu))
+                               rcu_check_callbacks(cpu, 0);
+
                        if (!idle)
                                idle = default_idle;
 
@@ -248,7 +251,7 @@ void cpu_idle_wait(void)
                 * because it has nothing to do.
                 * Give all the remaining CPUS a kick.
                 */
-               smp_call_function_mask(map, do_nothing, 0, 0);
+               smp_call_function_mask(map, do_nothing, NULL, 0);
        } while (!cpus_empty(map));
 
        set_cpus_allowed(current, tmp);
@@ -282,19 +285,37 @@ static void mwait_idle(void)
        mwait_idle_with_hints(0, 0);
 }
 
+static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+{
+       if (force_mwait)
+               return 1;
+       /* Any C1 states supported? */
+       return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
+}
+
 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
 {
-       if (cpu_has(c, X86_FEATURE_MWAIT)) {
-               printk("monitor/mwait feature present.\n");
+       static int selected;
+
+       if (selected)
+               return;
+#ifdef CONFIG_X86_SMP
+       if (pm_idle == poll_idle && smp_num_siblings > 1) {
+               printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
+                       " performance may degrade.\n");
+       }
+#endif
+       if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
                /*
                 * Skip, if setup has overridden idle.
                 * One CPU supports mwait => All CPUs supports mwait
                 */
                if (!pm_idle) {
-                       printk("using mwait in idle threads.\n");
+                       printk(KERN_INFO "using mwait in idle threads.\n");
                        pm_idle = mwait_idle;
                }
        }
+       selected = 1;
 }
 
 static int __init idle_setup(char *str)
@@ -302,10 +323,6 @@ static int __init idle_setup(char *str)
        if (!strcmp(str, "poll")) {
                printk("using polling idle threads.\n");
                pm_idle = poll_idle;
-#ifdef CONFIG_X86_SMP
-               if (smp_num_siblings > 1)
-                       printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
-#endif
        } else if (!strcmp(str, "mwait"))
                force_mwait = 1;
        else
@@ -379,7 +396,7 @@ void __show_registers(struct pt_regs *regs, int all)
 void show_regs(struct pt_regs *regs)
 {
        __show_registers(regs, 1);
-       show_trace(NULL, regs, &regs->sp);
+       show_trace(NULL, regs, &regs->sp, regs->bp);
 }
 
 /*
@@ -488,12 +505,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
        childregs->ax = 0;
        childregs->sp = sp;
 
-       p->thread.esp = (unsigned long) childregs;
-       p->thread.esp0 = (unsigned long) (childregs+1);
+       p->thread.sp = (unsigned long) childregs;
+       p->thread.sp0 = (unsigned long) (childregs+1);
 
-       p->thread.eip = (unsigned long) ret_from_fork;
+       p->thread.ip = (unsigned long) ret_from_fork;
 
-       savesegment(gs,p->thread.gs);
+       savesegment(gs, p->thread.gs);
 
        tsk = current;
        if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -571,24 +588,8 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
 }
 EXPORT_SYMBOL(dump_thread);
 
-/* 
- * Capture the user space registers if the task is not running (in user space)
- */
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
-       struct pt_regs ptregs = *task_pt_regs(tsk);
-       ptregs.cs &= 0xffff;
-       ptregs.ds &= 0xffff;
-       ptregs.es &= 0xffff;
-       ptregs.ss &= 0xffff;
-
-       elf_core_copy_regs(regs, &ptregs);
-
-       return 1;
-}
-
 #ifdef CONFIG_SECCOMP
-void hard_disable_TSC(void)
+static void hard_disable_TSC(void)
 {
        write_cr4(read_cr4() | X86_CR4_TSD);
 }
@@ -603,7 +604,7 @@ void disable_TSC(void)
                hard_disable_TSC();
        preempt_enable();
 }
-void hard_enable_TSC(void)
+static void hard_enable_TSC(void)
 {
        write_cr4(read_cr4() & ~X86_CR4_TSD);
 }
@@ -614,11 +615,21 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                 struct tss_struct *tss)
 {
        struct thread_struct *prev, *next;
+       unsigned long debugctl;
 
        prev = &prev_p->thread;
        next = &next_p->thread;
 
-       if (next->debugctlmsr != prev->debugctlmsr)
+       debugctl = prev->debugctlmsr;
+       if (next->ds_area_msr != prev->ds_area_msr) {
+               /* we clear debugctl to make sure DS
+                * is not in use when we change it */
+               debugctl = 0;
+               wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
+               wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
+       }
+
+       if (next->debugctlmsr != debugctl)
                wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0);
 
        if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
@@ -642,6 +653,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
        }
 #endif
 
+       if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
+               ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
+
+       if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
+               ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
+
+
        if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
                /*
                 * Disable the bitmap via an invalid offset. We still cache
@@ -699,7 +717,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
  * the task-switch, and shows up in ret_from_fork in entry.S,
  * for example.
  */
-struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
        struct thread_struct *prev = &prev_p->thread,
                                 *next = &next_p->thread;
@@ -718,7 +736,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        /*
         * Reload esp0.
         */
-       load_esp0(tss, next);
+       load_sp0(tss, next);
 
        /*
         * Save away %gs. No need to save %fs, as it was saved on the
@@ -851,7 +869,7 @@ unsigned long get_wchan(struct task_struct *p)
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
        stack_page = (unsigned long)task_stack_page(p);
-       sp = p->thread.esp;
+       sp = p->thread.sp;
        if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
                return 0;
        /* include/asm-i386/system.h:switch_to() pushes bp last. */