x86/cpu, x86/pti: Do not enable PTI on AMD processors
[pandora-kernel.git] / arch / x86 / kernel / cpu / common.c
index aa003b1..75bdfa7 100644 (file)
@@ -84,7 +84,7 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
 
 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
 
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page) = { .gdt = {
 #ifdef CONFIG_X86_64
        /*
         * We need valid kernel segments for data and code in long mode too
@@ -140,6 +140,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 
 static int __init x86_xsave_setup(char *s)
 {
+       if (strlen(s))
+               return 0;
        setup_clear_cpu_cap(X86_FEATURE_XSAVE);
        setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
        return 1;
@@ -153,6 +155,40 @@ static int __init x86_xsaveopt_setup(char *s)
 }
 __setup("noxsaveopt", x86_xsaveopt_setup);
 
+#ifdef CONFIG_X86_64
+static int __init x86_pcid_setup(char *s)
+{
+       /* require an exact match without trailing characters */
+       if (strlen(s))
+               return 0;
+
+       /* do not emit a message if the feature is not present */
+       if (!boot_cpu_has(X86_FEATURE_PCID))
+               return 1;
+
+       setup_clear_cpu_cap(X86_FEATURE_PCID);
+       pr_info("nopcid: PCID feature disabled\n");
+       return 1;
+}
+__setup("nopcid", x86_pcid_setup);
+#endif
+
+static int __init x86_noinvpcid_setup(char *s)
+{
+       /* noinvpcid doesn't accept parameters */
+       if (s)
+               return -EINVAL;
+
+       /* do not emit a message if the feature is not present */
+       if (!boot_cpu_has(X86_FEATURE_INVPCID))
+               return 0;
+
+       setup_clear_cpu_cap(X86_FEATURE_INVPCID);
+       pr_info("noinvpcid: INVPCID feature disabled\n");
+       return 0;
+}
+early_param("noinvpcid", x86_noinvpcid_setup);
+
 #ifdef CONFIG_X86_32
 static int cachesize_override __cpuinitdata = -1;
 static int disable_x86_serial_nr __cpuinitdata = 1;
@@ -274,6 +310,44 @@ static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
        }
 }
 
+static void setup_pcid(struct cpuinfo_x86 *c)
+{
+       if (cpu_has(c, X86_FEATURE_PCID)) {
+               if (IS_ENABLED(CONFIG_X86_64) &&
+                    (cpu_has(c, X86_FEATURE_PGE) || kaiser_enabled)) {
+                       /*
+                        * Regardless of whether PCID is enumerated, the
+                        * SDM says that it can't be enabled in 32-bit mode.
+                        */
+                       set_in_cr4(X86_CR4_PCIDE);
+                       /*
+                        * INVPCID has two "groups" of types:
+                        * 1/2: Invalidate an individual address
+                        * 3/4: Invalidate all contexts
+                        *
+                        * 1/2 take a PCID, but 3/4 do not.  So, 3/4
+                        * ignore the PCID argument in the descriptor.
+                        * But, we have to be careful not to call 1/2
+                        * with an actual non-zero PCID in them before
+                        * we do the above set_in_cr4().
+                        */
+                       if (cpu_has(c, X86_FEATURE_INVPCID))
+                               set_cpu_cap(c, X86_FEATURE_INVPCID_SINGLE);
+               } else {
+                       /*
+                        * flush_tlb_all(), as currently implemented, won't
+                        * work if PCID is on but PGE is not.  Since that
+                        * combination doesn't exist on real hardware, there's
+                        * no reason to try to fully support it, but it's
+                        * polite to avoid corrupting data if we're on
+                        * an improperly configured VM.
+                        */
+                       clear_cpu_cap(c, X86_FEATURE_PCID);
+               }
+       }
+       kaiser_setup_pcid();
+}
+
 /*
  * Some CPU features depend on higher CPUID levels, which may not always
  * be available due to CPUID level capping or broken virtualization
@@ -350,8 +424,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
        return NULL;            /* Not found */
 }
 
-__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
-__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __cpuinitdata;
+__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __cpuinitdata;
 
 void load_percpu_segment(int cpu)
 {
@@ -558,6 +632,16 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
        }
 }
 
+static void apply_forced_caps(struct cpuinfo_x86 *c)
+{
+       int i;
+
+       for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
+               c->x86_capability[i] &= ~cpu_caps_cleared[i];
+               c->x86_capability[i] |= cpu_caps_set[i];
+       }
+}
+
 void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
 {
        u32 tfms, xlvl;
@@ -676,15 +760,16 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        if (this_cpu->c_early_init)
                this_cpu->c_early_init(c);
 
-#ifdef CONFIG_SMP
        c->cpu_index = 0;
-#endif
        filter_cpuid_features(c, false);
 
        setup_smep(c);
 
        if (this_cpu->c_bsp_init)
                this_cpu->c_bsp_init(c);
+
+       if (c->x86_vendor != X86_VENDOR_AMD)
+               setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
 }
 
 void __init early_cpu_init(void)
@@ -764,10 +849,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
                c->apicid = c->initial_apicid;
 # endif
 #endif
-
-#ifdef CONFIG_X86_HT
                c->phys_proc_id = c->initial_apicid;
-#endif
        }
 
        setup_smep(c);
@@ -811,10 +893,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
                this_cpu->c_identify(c);
 
        /* Clear/Set all flags overriden by options, after probe */
-       for (i = 0; i < NCAPINTS; i++) {
-               c->x86_capability[i] &= ~cpu_caps_cleared[i];
-               c->x86_capability[i] |= cpu_caps_set[i];
-       }
+       apply_forced_caps(c);
 
 #ifdef CONFIG_X86_64
        c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
@@ -836,6 +915,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
        /* Disable the PN if appropriate */
        squash_the_stupid_serial_number(c);
 
+       /* Set up PCID */
+       setup_pcid(c);
+
        /*
         * The vendor-specific functions might have changed features.
         * Now we do "generic changes."
@@ -867,10 +949,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
         * Clear/Set all flags overriden by options, need do it
         * before following smp all cpus cap AND.
         */
-       for (i = 0; i < NCAPINTS; i++) {
-               c->x86_capability[i] &= ~cpu_caps_cleared[i];
-               c->x86_capability[i] |= cpu_caps_set[i];
-       }
+       apply_forced_caps(c);
 
        /*
         * On SMP, boot_cpu_data holds the common feature set between
@@ -882,6 +961,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
                /* AND the already accumulated flags with these */
                for (i = 0; i < NCAPINTS; i++)
                        boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+
+               /* OR, i.e. replicate the bug flags */
+               for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
+                       c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
        }
 
        /* Init Machine Check Exception if available. */
@@ -1015,7 +1098,7 @@ static __init int setup_disablecpuid(char *arg)
 {
        int bit;
 
-       if (get_option(&arg, &bit) && bit < NCAPINTS*32)
+       if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
                setup_clear_cpu_cap(bit);
        else
                return 0;
@@ -1058,7 +1141,7 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
          [DEBUG_STACK - 1]                     = DEBUG_STKSZ
 };
 
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(char, exception_stacks
        [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
 
 /* May not be marked __init: used by software suspend */
@@ -1158,6 +1241,15 @@ void __cpuinit cpu_init(void)
        int cpu;
        int i;
 
+       if (!kaiser_enabled) {
+               /*
+                * secondary_startup_64() deferred setting PGE in cr4:
+                * init_memory_mapping() sets it on the boot cpu,
+                * but it needs to be set on each secondary cpu.
+                */
+               set_in_cr4(X86_CR4_PGE);
+       }
+
        cpu = stack_smp_processor_id();
        t = &per_cpu(init_tss, cpu);
        oist = &per_cpu(orig_ist, cpu);
@@ -1228,7 +1320,7 @@ void __cpuinit cpu_init(void)
        load_sp0(t, &current->thread);
        set_tss_desc(cpu, t);
        load_TR_desc();
-       load_LDT(&init_mm.context);
+       load_mm_ldt(&init_mm);
 
        clear_all_debug_regs();
        dbg_restore_debug_regs();
@@ -1276,7 +1368,7 @@ void __cpuinit cpu_init(void)
        load_sp0(t, thread);
        set_tss_desc(cpu, t);
        load_TR_desc();
-       load_LDT(&init_mm.context);
+       load_mm_ldt(&init_mm);
 
        t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);