x86/cpu/bugs: Make retpoline module warning conditional
[pandora-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 /*
2  *  Copyright (C) 1994  Linus Torvalds
3  *
4  *  Cyrix stuff, June 1998 by:
5  *      - Rafael R. Reilova (moved everything from head.S),
6  *        <rreilova@ececs.uc.edu>
7  *      - Channing Corn (tests & fixes),
8  *      - Andrew D. Balsa (code cleanup).
9  */
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14
15 #include <asm/nospec-branch.h>
16 #include <asm/cmdline.h>
17 #include <asm/bugs.h>
18 #include <asm/processor.h>
19 #include <asm/processor-flags.h>
20 #include <asm/i387.h>
21 #include <asm/msr.h>
22 #include <asm/paravirt.h>
23 #include <asm/alternative.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/intel-family.h>
27
28 static void __init spectre_v2_select_mitigation(void);
29
30 #ifdef CONFIG_X86_32
31
32 static int __init no_halt(char *s)
33 {
34         WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
35         boot_cpu_data.hlt_works_ok = 0;
36         return 1;
37 }
38
39 __setup("no-hlt", no_halt);
40
41 static int __init no_387(char *s)
42 {
43         boot_cpu_data.hard_math = 0;
44         write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
45         return 1;
46 }
47
48 __setup("no387", no_387);
49
50 static double __initdata x = 4195835.0;
51 static double __initdata y = 3145727.0;
52
53 /*
54  * This used to check for exceptions..
55  * However, it turns out that to support that,
56  * the XMM trap handlers basically had to
57  * be buggy. So let's have a correct XMM trap
58  * handler, and forget about printing out
59  * some status at boot.
60  *
61  * We should really only care about bugs here
62  * anyway. Not features.
63  */
64 static void __init check_fpu(void)
65 {
66         s32 fdiv_bug;
67
68         if (!boot_cpu_data.hard_math) {
69 #ifndef CONFIG_MATH_EMULATION
70                 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
71                 printk(KERN_EMERG "Giving up.\n");
72                 for (;;) ;
73 #endif
74                 return;
75         }
76
77         kernel_fpu_begin();
78
79         /*
80          * trap_init() enabled FXSR and company _before_ testing for FP
81          * problems here.
82          *
83          * Test for the divl bug..
84          */
85         __asm__("fninit\n\t"
86                 "fldl %1\n\t"
87                 "fdivl %2\n\t"
88                 "fmull %2\n\t"
89                 "fldl %1\n\t"
90                 "fsubp %%st,%%st(1)\n\t"
91                 "fistpl %0\n\t"
92                 "fwait\n\t"
93                 "fninit"
94                 : "=m" (*&fdiv_bug)
95                 : "m" (*&x), "m" (*&y));
96
97         kernel_fpu_end();
98
99         boot_cpu_data.fdiv_bug = fdiv_bug;
100         if (boot_cpu_data.fdiv_bug)
101                 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
102 }
103
104 static void __init check_hlt(void)
105 {
106         if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
107                 return;
108
109         printk(KERN_INFO "Checking 'hlt' instruction... ");
110         if (!boot_cpu_data.hlt_works_ok) {
111                 printk("disabled\n");
112                 return;
113         }
114         halt();
115         halt();
116         halt();
117         halt();
118         printk(KERN_CONT "OK.\n");
119 }
120
121 /*
122  *      Most 386 processors have a bug where a POPAD can lock the
123  *      machine even from user space.
124  */
125
126 static void __init check_popad(void)
127 {
128 #ifndef CONFIG_X86_POPAD_OK
129         int res, inp = (int) &res;
130
131         printk(KERN_INFO "Checking for popad bug... ");
132         __asm__ __volatile__(
133           "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
134           : "=&a" (res)
135           : "d" (inp)
136           : "ecx", "edi");
137         /*
138          * If this fails, it means that any user program may lock the
139          * CPU hard. Too bad.
140          */
141         if (res != 12345678)
142                 printk(KERN_CONT "Buggy.\n");
143         else
144                 printk(KERN_CONT "OK.\n");
145 #endif
146 }
147
148 /*
149  * Check whether we are able to run this kernel safely on SMP.
150  *
151  * - In order to run on a i386, we need to be compiled for i386
152  *   (for due to lack of "invlpg" and working WP on a i386)
153  * - In order to run on anything without a TSC, we need to be
154  *   compiled for a i486.
155  */
156
157 static void __init check_config(void)
158 {
159 /*
160  * We'd better not be a i386 if we're configured to use some
161  * i486+ only features! (WP works in supervisor mode and the
162  * new "invlpg" and "bswap" instructions)
163  */
164 #if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || \
165         defined(CONFIG_X86_BSWAP)
166         if (boot_cpu_data.x86 == 3)
167                 panic("Kernel requires i486+ for 'invlpg' and other features");
168 #endif
169 }
170
171 #endif /* CONFIG_X86_32 */
172
173 void __init check_bugs(void)
174 {
175 #ifdef CONFIG_X86_32
176         /*
177          * Regardless of whether PCID is enumerated, the SDM says
178          * that it can't be enabled in 32-bit mode.
179          */
180         setup_clear_cpu_cap(X86_FEATURE_PCID);
181 #endif
182
183         identify_boot_cpu();
184
185         if (!IS_ENABLED(CONFIG_SMP)) {
186                 pr_info("CPU: ");
187                 print_cpu_info(&boot_cpu_data);
188         }
189
190         /* Select the proper spectre mitigation before patching alternatives */
191         spectre_v2_select_mitigation();
192
193 #ifdef CONFIG_X86_32
194         check_config();
195         check_fpu();
196         check_hlt();
197         check_popad();
198         init_utsname()->machine[1] =
199                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
200         alternative_instructions();
201 #else /* CONFIG_X86_64 */
202         alternative_instructions();
203
204         /*
205          * Make sure the first 2MB area is not mapped by huge pages
206          * There are typically fixed size MTRRs in there and overlapping
207          * MTRRs into large pages causes slow downs.
208          *
209          * Right now we don't do that with gbpages because there seems
210          * very little benefit for that case.
211          */
212         if (!direct_gbpages)
213                 set_memory_4k((unsigned long)__va(0), 1);
214 #endif
215 }
216
217 /* The kernel command line selection */
218 enum spectre_v2_mitigation_cmd {
219         SPECTRE_V2_CMD_NONE,
220         SPECTRE_V2_CMD_AUTO,
221         SPECTRE_V2_CMD_FORCE,
222         SPECTRE_V2_CMD_RETPOLINE,
223         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
224         SPECTRE_V2_CMD_RETPOLINE_AMD,
225 };
226
227 static const char *spectre_v2_strings[] = {
228         [SPECTRE_V2_NONE]                       = "Vulnerable",
229         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
230         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
231         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
232         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
233 };
234
235 #undef pr_fmt
236 #define pr_fmt(fmt)     "Spectre V2 : " fmt
237
238 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
239
240 #ifdef RETPOLINE
241 static bool spectre_v2_bad_module;
242
243 bool retpoline_module_ok(bool has_retpoline)
244 {
245         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
246                 return true;
247
248         pr_err("System may be vunerable to spectre v2\n");
249         spectre_v2_bad_module = true;
250         return false;
251 }
252
253 static inline const char *spectre_v2_module_string(void)
254 {
255         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
256 }
257 #else
258 static inline const char *spectre_v2_module_string(void) { return ""; }
259 #endif
260
261 static void __init spec2_print_if_insecure(const char *reason)
262 {
263         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
264                 pr_info("%s\n", reason);
265 }
266
267 static void __init spec2_print_if_secure(const char *reason)
268 {
269         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
270                 pr_info("%s\n", reason);
271 }
272
273 static inline bool retp_compiler(void)
274 {
275         return __is_defined(RETPOLINE);
276 }
277
278 static inline bool match_option(const char *arg, int arglen, const char *opt)
279 {
280         int len = strlen(opt);
281
282         return len == arglen && !strncmp(arg, opt, len);
283 }
284
285 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
286 {
287         char arg[20];
288         int ret;
289
290         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
291                                   sizeof(arg));
292         if (ret > 0)  {
293                 if (match_option(arg, ret, "off")) {
294                         goto disable;
295                 } else if (match_option(arg, ret, "on")) {
296                         spec2_print_if_secure("force enabled on command line.");
297                         return SPECTRE_V2_CMD_FORCE;
298                 } else if (match_option(arg, ret, "retpoline")) {
299                         spec2_print_if_insecure("retpoline selected on command line.");
300                         return SPECTRE_V2_CMD_RETPOLINE;
301                 } else if (match_option(arg, ret, "retpoline,amd")) {
302                         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
303                                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
304                                 return SPECTRE_V2_CMD_AUTO;
305                         }
306                         spec2_print_if_insecure("AMD retpoline selected on command line.");
307                         return SPECTRE_V2_CMD_RETPOLINE_AMD;
308                 } else if (match_option(arg, ret, "retpoline,generic")) {
309                         spec2_print_if_insecure("generic retpoline selected on command line.");
310                         return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
311                 } else if (match_option(arg, ret, "auto")) {
312                         return SPECTRE_V2_CMD_AUTO;
313                 }
314         }
315
316         if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
317                 return SPECTRE_V2_CMD_AUTO;
318 disable:
319         spec2_print_if_insecure("disabled on command line.");
320         return SPECTRE_V2_CMD_NONE;
321 }
322
323 /* Check for Skylake-like CPUs (for RSB handling) */
324 static bool __init is_skylake_era(void)
325 {
326         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
327             boot_cpu_data.x86 == 6) {
328                 switch (boot_cpu_data.x86_model) {
329                 case INTEL_FAM6_SKYLAKE_MOBILE:
330                 case INTEL_FAM6_SKYLAKE_DESKTOP:
331                 case INTEL_FAM6_SKYLAKE_X:
332                 case INTEL_FAM6_KABYLAKE_MOBILE:
333                 case INTEL_FAM6_KABYLAKE_DESKTOP:
334                         return true;
335                 }
336         }
337         return false;
338 }
339
340 static void __init spectre_v2_select_mitigation(void)
341 {
342         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
343         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
344
345         /*
346          * If the CPU is not affected and the command line mode is NONE or AUTO
347          * then nothing to do.
348          */
349         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
350             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
351                 return;
352
353         switch (cmd) {
354         case SPECTRE_V2_CMD_NONE:
355                 return;
356
357         case SPECTRE_V2_CMD_FORCE:
358                 /* FALLTRHU */
359         case SPECTRE_V2_CMD_AUTO:
360                 goto retpoline_auto;
361
362         case SPECTRE_V2_CMD_RETPOLINE_AMD:
363                 if (IS_ENABLED(CONFIG_RETPOLINE))
364                         goto retpoline_amd;
365                 break;
366         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
367                 if (IS_ENABLED(CONFIG_RETPOLINE))
368                         goto retpoline_generic;
369                 break;
370         case SPECTRE_V2_CMD_RETPOLINE:
371                 if (IS_ENABLED(CONFIG_RETPOLINE))
372                         goto retpoline_auto;
373                 break;
374         }
375         pr_err("kernel not compiled with retpoline; no mitigation available!");
376         return;
377
378 retpoline_auto:
379         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
380         retpoline_amd:
381                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
382                         pr_err("LFENCE not serializing. Switching to generic retpoline\n");
383                         goto retpoline_generic;
384                 }
385                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
386                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
387                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
388                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
389         } else {
390         retpoline_generic:
391                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
392                                          SPECTRE_V2_RETPOLINE_MINIMAL;
393                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
394         }
395
396         spectre_v2_enabled = mode;
397         pr_info("%s\n", spectre_v2_strings[mode]);
398
399         /*
400          * If neither SMEP or KPTI are available, there is a risk of
401          * hitting userspace addresses in the RSB after a context switch
402          * from a shallow call stack to a deeper one. To prevent this fill
403          * the entire RSB, even when using IBRS.
404          *
405          * Skylake era CPUs have a separate issue with *underflow* of the
406          * RSB, when they will predict 'ret' targets from the generic BTB.
407          * The proper mitigation for this is IBRS. If IBRS is not supported
408          * or deactivated in favour of retpolines the RSB fill on context
409          * switch is required.
410          */
411         if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
412              !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
413                 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
414                 pr_info("Filling RSB on context switch\n");
415         }
416 }
417
418 #undef pr_fmt
419
420 #ifdef CONFIG_SYSFS
421 ssize_t cpu_show_meltdown(struct sysdev_class *dev,
422                           struct sysdev_class_attribute *attr, char *buf)
423 {
424         if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
425                 return sprintf(buf, "Not affected\n");
426         if (boot_cpu_has(X86_FEATURE_KAISER))
427                 return sprintf(buf, "Mitigation: PTI\n");
428         return sprintf(buf, "Vulnerable\n");
429 }
430
431 ssize_t cpu_show_spectre_v1(struct sysdev_class *dev,
432                             struct sysdev_class_attribute *attr, char *buf)
433 {
434         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
435                 return sprintf(buf, "Not affected\n");
436         return sprintf(buf, "Vulnerable\n");
437 }
438
439 ssize_t cpu_show_spectre_v2(struct sysdev_class *dev,
440                             struct sysdev_class_attribute *attr, char *buf)
441 {
442         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
443                 return sprintf(buf, "Not affected\n");
444
445         return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled],
446                        spectre_v2_module_string());
447 }
448 #endif