x86: unify power/cpu_(32|64) regarding restoring processor state
[pandora-kernel.git] / arch / x86 / power / cpu_64.c
index 11ea7d0..5c8bdc0 100644 (file)
@@ -28,8 +28,6 @@ unsigned long saved_context_esi, saved_context_edi;
 unsigned long saved_context_eflags;
 #else
 /* CONFIG_X86_64 */
-static void fix_processor_context(void);
-
 struct saved_context saved_context;
 #endif
 
@@ -120,11 +118,57 @@ EXPORT_SYMBOL(save_processor_state);
 static void do_fpu_end(void)
 {
        /*
-        * Restore FPU regs if necessary
+        * Restore FPU regs if necessary.
         */
        kernel_fpu_end();
 }
 
+static void fix_processor_context(void)
+{
+       int cpu = smp_processor_id();
+       struct tss_struct *t = &per_cpu(init_tss, cpu);
+
+       set_tss_desc(cpu, t);   /*
+                                * This just modifies memory; should not be
+                                * necessary. But... This is necessary, because
+                                * 386 hardware has concept of busy TSS or some
+                                * similar stupidity.
+                                */
+
+#ifdef CONFIG_X86_64
+       get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
+
+       syscall_init();                         /* This sets MSR_*STAR and related */
+#endif
+       load_TR_desc();                         /* This does ltr */
+       load_LDT(&current->active_mm->context); /* This does lldt */
+
+       /*
+        * Now maybe reload the debug registers
+        */
+       if (current->thread.debugreg7) {
+#ifdef CONFIG_X86_32
+               set_debugreg(current->thread.debugreg0, 0);
+               set_debugreg(current->thread.debugreg1, 1);
+               set_debugreg(current->thread.debugreg2, 2);
+               set_debugreg(current->thread.debugreg3, 3);
+               /* no 4 and 5 */
+               set_debugreg(current->thread.debugreg6, 6);
+               set_debugreg(current->thread.debugreg7, 7);
+#else
+               /* CONFIG_X86_64 */
+               loaddebug(&current->thread, 0);
+               loaddebug(&current->thread, 1);
+               loaddebug(&current->thread, 2);
+               loaddebug(&current->thread, 3);
+               /* no 4 and 5 */
+               loaddebug(&current->thread, 6);
+               loaddebug(&current->thread, 7);
+#endif
+       }
+
+}
+
 /**
  *     __restore_processor_state - restore the contents of CPU registers saved
  *             by __save_processor_state()
@@ -135,9 +179,16 @@ static void __restore_processor_state(struct saved_context *ctxt)
        /*
         * control registers
         */
+       /* cr4 was introduced in the Pentium CPU */
+#ifdef CONFIG_X86_32
+       if (ctxt->cr4)
+               write_cr4(ctxt->cr4);
+#else
+/* CONFIG X86_64 */
        wrmsrl(MSR_EFER, ctxt->efer);
        write_cr8(ctxt->cr8);
        write_cr4(ctxt->cr4);
+#endif
        write_cr3(ctxt->cr3);
        write_cr2(ctxt->cr2);
        write_cr0(ctxt->cr0);
@@ -146,13 +197,31 @@ static void __restore_processor_state(struct saved_context *ctxt)
         * now restore the descriptor tables to their proper values
         * ltr is done i fix_processor_context().
         */
+#ifdef CONFIG_X86_32
+       load_gdt(&ctxt->gdt);
+       load_idt(&ctxt->idt);
+#else
+/* CONFIG_X86_64 */
        load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
        load_idt((const struct desc_ptr *)&ctxt->idt_limit);
-
+#endif
 
        /*
         * segment registers
         */
+#ifdef CONFIG_X86_32
+       loadsegment(es, ctxt->es);
+       loadsegment(fs, ctxt->fs);
+       loadsegment(gs, ctxt->gs);
+       loadsegment(ss, ctxt->ss);
+
+       /*
+        * sysenter MSRs
+        */
+       if (boot_cpu_has(X86_FEATURE_SEP))
+               enable_sep_cpu();
+#else
+/* CONFIG_X86_64 */
        asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
        asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
        asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
@@ -162,6 +231,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
        wrmsrl(MSR_FS_BASE, ctxt->fs_base);
        wrmsrl(MSR_GS_BASE, ctxt->gs_base);
        wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+#endif
 
        /*
         * restore XCR0 for xsave capable cpu's.
@@ -173,41 +243,17 @@ static void __restore_processor_state(struct saved_context *ctxt)
 
        do_fpu_end();
        mtrr_ap_init();
+
+#ifdef CONFIG_X86_32
+       mcheck_init(&boot_cpu_data);
+#endif
 }
 
+/* Needed by apm.c */
 void restore_processor_state(void)
 {
        __restore_processor_state(&saved_context);
 }
-
-static void fix_processor_context(void)
-{
-       int cpu = smp_processor_id();
-       struct tss_struct *t = &per_cpu(init_tss, cpu);
-
-       /*
-        * This just modifies memory; should not be necessary. But... This
-        * is necessary, because 386 hardware has concept of busy TSS or some
-        * similar stupidity.
-        */
-       set_tss_desc(cpu, t);
-
-       get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
-
-       syscall_init();                         /* This sets MSR_*STAR and related */
-       load_TR_desc();                         /* This does ltr */
-       load_LDT(&current->active_mm->context); /* This does lldt */
-
-       /*
-        * Now maybe reload the debug registers
-        */
-       if (current->thread.debugreg7){
-                loaddebug(&current->thread, 0);
-                loaddebug(&current->thread, 1);
-                loaddebug(&current->thread, 2);
-                loaddebug(&current->thread, 3);
-                /* no 4 and 5 */
-                loaddebug(&current->thread, 6);
-                loaddebug(&current->thread, 7);
-       }
-}
+#ifdef CONFIG_X86_32
+EXPORT_SYMBOL(restore_processor_state);
+#endif