320996cc9053e32547c9426629f121e965c0a5d9
[pandora-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 /*
2  *  Copyright (C) 1994  Linus Torvalds
3  *
4  *  Cyrix stuff, June 1998 by:
5  *      - Rafael R. Reilova (moved everything from head.S),
6  *        <rreilova@ececs.uc.edu>
7  *      - Channing Corn (tests & fixes),
8  *      - Andrew D. Balsa (code cleanup).
9  */
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14
15 #include <asm/nospec-branch.h>
16 #include <asm/cmdline.h>
17 #include <asm/bugs.h>
18 #include <asm/processor.h>
19 #include <asm/processor-flags.h>
20 #include <asm/i387.h>
21 #include <asm/msr.h>
22 #include <asm/paravirt.h>
23 #include <asm/alternative.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/intel-family.h>
27
28 static void __init spectre_v2_select_mitigation(void);
29
30 #ifdef CONFIG_X86_32
31
32 static int __init no_halt(char *s)
33 {
34         WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
35         boot_cpu_data.hlt_works_ok = 0;
36         return 1;
37 }
38
39 __setup("no-hlt", no_halt);
40
41 static int __init no_387(char *s)
42 {
43         boot_cpu_data.hard_math = 0;
44         write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
45         return 1;
46 }
47
48 __setup("no387", no_387);
49
50 static double __initdata x = 4195835.0;
51 static double __initdata y = 3145727.0;
52
53 /*
54  * This used to check for exceptions..
55  * However, it turns out that to support that,
56  * the XMM trap handlers basically had to
57  * be buggy. So let's have a correct XMM trap
58  * handler, and forget about printing out
59  * some status at boot.
60  *
61  * We should really only care about bugs here
62  * anyway. Not features.
63  */
64 static void __init check_fpu(void)
65 {
66         s32 fdiv_bug;
67
68         if (!boot_cpu_data.hard_math) {
69 #ifndef CONFIG_MATH_EMULATION
70                 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
71                 printk(KERN_EMERG "Giving up.\n");
72                 for (;;) ;
73 #endif
74                 return;
75         }
76
77         kernel_fpu_begin();
78
79         /*
80          * trap_init() enabled FXSR and company _before_ testing for FP
81          * problems here.
82          *
83          * Test for the divl bug..
84          */
85         __asm__("fninit\n\t"
86                 "fldl %1\n\t"
87                 "fdivl %2\n\t"
88                 "fmull %2\n\t"
89                 "fldl %1\n\t"
90                 "fsubp %%st,%%st(1)\n\t"
91                 "fistpl %0\n\t"
92                 "fwait\n\t"
93                 "fninit"
94                 : "=m" (*&fdiv_bug)
95                 : "m" (*&x), "m" (*&y));
96
97         kernel_fpu_end();
98
99         boot_cpu_data.fdiv_bug = fdiv_bug;
100         if (boot_cpu_data.fdiv_bug)
101                 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
102 }
103
104 static void __init check_hlt(void)
105 {
106         if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
107                 return;
108
109         printk(KERN_INFO "Checking 'hlt' instruction... ");
110         if (!boot_cpu_data.hlt_works_ok) {
111                 printk("disabled\n");
112                 return;
113         }
114         halt();
115         halt();
116         halt();
117         halt();
118         printk(KERN_CONT "OK.\n");
119 }
120
121 /*
122  *      Most 386 processors have a bug where a POPAD can lock the
123  *      machine even from user space.
124  */
125
126 static void __init check_popad(void)
127 {
128 #ifndef CONFIG_X86_POPAD_OK
129         int res, inp = (int) &res;
130
131         printk(KERN_INFO "Checking for popad bug... ");
132         __asm__ __volatile__(
133           "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
134           : "=&a" (res)
135           : "d" (inp)
136           : "ecx", "edi");
137         /*
138          * If this fails, it means that any user program may lock the
139          * CPU hard. Too bad.
140          */
141         if (res != 12345678)
142                 printk(KERN_CONT "Buggy.\n");
143         else
144                 printk(KERN_CONT "OK.\n");
145 #endif
146 }
147
148 /*
149  * Check whether we are able to run this kernel safely on SMP.
150  *
151  * - In order to run on a i386, we need to be compiled for i386
152  *   (for due to lack of "invlpg" and working WP on a i386)
153  * - In order to run on anything without a TSC, we need to be
154  *   compiled for a i486.
155  */
156
157 static void __init check_config(void)
158 {
159 /*
160  * We'd better not be a i386 if we're configured to use some
161  * i486+ only features! (WP works in supervisor mode and the
162  * new "invlpg" and "bswap" instructions)
163  */
164 #if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || \
165         defined(CONFIG_X86_BSWAP)
166         if (boot_cpu_data.x86 == 3)
167                 panic("Kernel requires i486+ for 'invlpg' and other features");
168 #endif
169 }
170
171 #endif /* CONFIG_X86_32 */
172
173 void __init check_bugs(void)
174 {
175 #ifdef CONFIG_X86_32
176         /*
177          * Regardless of whether PCID is enumerated, the SDM says
178          * that it can't be enabled in 32-bit mode.
179          */
180         setup_clear_cpu_cap(X86_FEATURE_PCID);
181 #endif
182
183         identify_boot_cpu();
184
185         if (!IS_ENABLED(CONFIG_SMP)) {
186                 pr_info("CPU: ");
187                 print_cpu_info(&boot_cpu_data);
188         }
189
190         /* Select the proper spectre mitigation before patching alternatives */
191         spectre_v2_select_mitigation();
192
193 #ifdef CONFIG_X86_32
194         check_config();
195         check_fpu();
196         check_hlt();
197         check_popad();
198         init_utsname()->machine[1] =
199                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
200         alternative_instructions();
201 #else /* CONFIG_X86_64 */
202         alternative_instructions();
203
204         /*
205          * Make sure the first 2MB area is not mapped by huge pages
206          * There are typically fixed size MTRRs in there and overlapping
207          * MTRRs into large pages causes slow downs.
208          *
209          * Right now we don't do that with gbpages because there seems
210          * very little benefit for that case.
211          */
212         if (!direct_gbpages)
213                 set_memory_4k((unsigned long)__va(0), 1);
214 #endif
215 }
216
217 /* The kernel command line selection */
218 enum spectre_v2_mitigation_cmd {
219         SPECTRE_V2_CMD_NONE,
220         SPECTRE_V2_CMD_AUTO,
221         SPECTRE_V2_CMD_FORCE,
222         SPECTRE_V2_CMD_RETPOLINE,
223         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
224         SPECTRE_V2_CMD_RETPOLINE_AMD,
225 };
226
227 static const char *spectre_v2_strings[] = {
228         [SPECTRE_V2_NONE]                       = "Vulnerable",
229         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
230         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
231         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
232         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
233 };
234
235 #undef pr_fmt
236 #define pr_fmt(fmt)     "Spectre V2 : " fmt
237
238 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
239 static bool spectre_v2_bad_module;
240
241 #ifdef RETPOLINE
242 bool retpoline_module_ok(bool has_retpoline)
243 {
244         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
245                 return true;
246
247         pr_err("System may be vunerable to spectre v2\n");
248         spectre_v2_bad_module = true;
249         return false;
250 }
251 #endif
252
253 static void __init spec2_print_if_insecure(const char *reason)
254 {
255         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
256                 pr_info("%s\n", reason);
257 }
258
259 static void __init spec2_print_if_secure(const char *reason)
260 {
261         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
262                 pr_info("%s\n", reason);
263 }
264
265 static inline bool retp_compiler(void)
266 {
267         return __is_defined(RETPOLINE);
268 }
269
270 static inline bool match_option(const char *arg, int arglen, const char *opt)
271 {
272         int len = strlen(opt);
273
274         return len == arglen && !strncmp(arg, opt, len);
275 }
276
277 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
278 {
279         char arg[20];
280         int ret;
281
282         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
283                                   sizeof(arg));
284         if (ret > 0)  {
285                 if (match_option(arg, ret, "off")) {
286                         goto disable;
287                 } else if (match_option(arg, ret, "on")) {
288                         spec2_print_if_secure("force enabled on command line.");
289                         return SPECTRE_V2_CMD_FORCE;
290                 } else if (match_option(arg, ret, "retpoline")) {
291                         spec2_print_if_insecure("retpoline selected on command line.");
292                         return SPECTRE_V2_CMD_RETPOLINE;
293                 } else if (match_option(arg, ret, "retpoline,amd")) {
294                         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
295                                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
296                                 return SPECTRE_V2_CMD_AUTO;
297                         }
298                         spec2_print_if_insecure("AMD retpoline selected on command line.");
299                         return SPECTRE_V2_CMD_RETPOLINE_AMD;
300                 } else if (match_option(arg, ret, "retpoline,generic")) {
301                         spec2_print_if_insecure("generic retpoline selected on command line.");
302                         return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
303                 } else if (match_option(arg, ret, "auto")) {
304                         return SPECTRE_V2_CMD_AUTO;
305                 }
306         }
307
308         if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
309                 return SPECTRE_V2_CMD_AUTO;
310 disable:
311         spec2_print_if_insecure("disabled on command line.");
312         return SPECTRE_V2_CMD_NONE;
313 }
314
315 /* Check for Skylake-like CPUs (for RSB handling) */
316 static bool __init is_skylake_era(void)
317 {
318         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
319             boot_cpu_data.x86 == 6) {
320                 switch (boot_cpu_data.x86_model) {
321                 case INTEL_FAM6_SKYLAKE_MOBILE:
322                 case INTEL_FAM6_SKYLAKE_DESKTOP:
323                 case INTEL_FAM6_SKYLAKE_X:
324                 case INTEL_FAM6_KABYLAKE_MOBILE:
325                 case INTEL_FAM6_KABYLAKE_DESKTOP:
326                         return true;
327                 }
328         }
329         return false;
330 }
331
332 static void __init spectre_v2_select_mitigation(void)
333 {
334         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
335         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
336
337         /*
338          * If the CPU is not affected and the command line mode is NONE or AUTO
339          * then nothing to do.
340          */
341         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
342             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
343                 return;
344
345         switch (cmd) {
346         case SPECTRE_V2_CMD_NONE:
347                 return;
348
349         case SPECTRE_V2_CMD_FORCE:
350                 /* FALLTRHU */
351         case SPECTRE_V2_CMD_AUTO:
352                 goto retpoline_auto;
353
354         case SPECTRE_V2_CMD_RETPOLINE_AMD:
355                 if (IS_ENABLED(CONFIG_RETPOLINE))
356                         goto retpoline_amd;
357                 break;
358         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
359                 if (IS_ENABLED(CONFIG_RETPOLINE))
360                         goto retpoline_generic;
361                 break;
362         case SPECTRE_V2_CMD_RETPOLINE:
363                 if (IS_ENABLED(CONFIG_RETPOLINE))
364                         goto retpoline_auto;
365                 break;
366         }
367         pr_err("kernel not compiled with retpoline; no mitigation available!");
368         return;
369
370 retpoline_auto:
371         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
372         retpoline_amd:
373                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
374                         pr_err("LFENCE not serializing. Switching to generic retpoline\n");
375                         goto retpoline_generic;
376                 }
377                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
378                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
379                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
380                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
381         } else {
382         retpoline_generic:
383                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
384                                          SPECTRE_V2_RETPOLINE_MINIMAL;
385                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
386         }
387
388         spectre_v2_enabled = mode;
389         pr_info("%s\n", spectre_v2_strings[mode]);
390
391         /*
392          * If neither SMEP or KPTI are available, there is a risk of
393          * hitting userspace addresses in the RSB after a context switch
394          * from a shallow call stack to a deeper one. To prevent this fill
395          * the entire RSB, even when using IBRS.
396          *
397          * Skylake era CPUs have a separate issue with *underflow* of the
398          * RSB, when they will predict 'ret' targets from the generic BTB.
399          * The proper mitigation for this is IBRS. If IBRS is not supported
400          * or deactivated in favour of retpolines the RSB fill on context
401          * switch is required.
402          */
403         if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
404              !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
405                 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
406                 pr_info("Filling RSB on context switch\n");
407         }
408 }
409
410 #undef pr_fmt
411
412 #ifdef CONFIG_SYSFS
413 ssize_t cpu_show_meltdown(struct sysdev_class *dev,
414                           struct sysdev_class_attribute *attr, char *buf)
415 {
416         if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
417                 return sprintf(buf, "Not affected\n");
418         if (boot_cpu_has(X86_FEATURE_KAISER))
419                 return sprintf(buf, "Mitigation: PTI\n");
420         return sprintf(buf, "Vulnerable\n");
421 }
422
423 ssize_t cpu_show_spectre_v1(struct sysdev_class *dev,
424                             struct sysdev_class_attribute *attr, char *buf)
425 {
426         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
427                 return sprintf(buf, "Not affected\n");
428         return sprintf(buf, "Vulnerable\n");
429 }
430
431 ssize_t cpu_show_spectre_v2(struct sysdev_class *dev,
432                             struct sysdev_class_attribute *attr, char *buf)
433 {
434         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
435                 return sprintf(buf, "Not affected\n");
436
437         return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled],
438                        spectre_v2_bad_module ? " - vulnerable module loaded" : "");
439 }
440 #endif