x86/spectre: Add boot time option to select Spectre v2 mitigation
[pandora-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 /*
2  *  Copyright (C) 1994  Linus Torvalds
3  *
4  *  Cyrix stuff, June 1998 by:
5  *      - Rafael R. Reilova (moved everything from head.S),
6  *        <rreilova@ececs.uc.edu>
7  *      - Channing Corn (tests & fixes),
8  *      - Andrew D. Balsa (code cleanup).
9  */
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13
14 #include <asm/nospec-branch.h>
15 #include <asm/cmdline.h>
16 #include <asm/bugs.h>
17 #include <asm/processor.h>
18 #include <asm/processor-flags.h>
19 #include <asm/i387.h>
20 #include <asm/msr.h>
21 #include <asm/paravirt.h>
22 #include <asm/alternative.h>
23 #include <asm/pgtable.h>
24 #include <asm/cacheflush.h>
25
26 static void __init spectre_v2_select_mitigation(void);
27
28 #ifdef CONFIG_X86_32
29
30 static int __init no_halt(char *s)
31 {
32         WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
33         boot_cpu_data.hlt_works_ok = 0;
34         return 1;
35 }
36
37 __setup("no-hlt", no_halt);
38
39 static int __init no_387(char *s)
40 {
41         boot_cpu_data.hard_math = 0;
42         write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
43         return 1;
44 }
45
46 __setup("no387", no_387);
47
48 static double __initdata x = 4195835.0;
49 static double __initdata y = 3145727.0;
50
51 /*
52  * This used to check for exceptions..
53  * However, it turns out that to support that,
54  * the XMM trap handlers basically had to
55  * be buggy. So let's have a correct XMM trap
56  * handler, and forget about printing out
57  * some status at boot.
58  *
59  * We should really only care about bugs here
60  * anyway. Not features.
61  */
62 static void __init check_fpu(void)
63 {
64         s32 fdiv_bug;
65
66         if (!boot_cpu_data.hard_math) {
67 #ifndef CONFIG_MATH_EMULATION
68                 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
69                 printk(KERN_EMERG "Giving up.\n");
70                 for (;;) ;
71 #endif
72                 return;
73         }
74
75         kernel_fpu_begin();
76
77         /*
78          * trap_init() enabled FXSR and company _before_ testing for FP
79          * problems here.
80          *
81          * Test for the divl bug..
82          */
83         __asm__("fninit\n\t"
84                 "fldl %1\n\t"
85                 "fdivl %2\n\t"
86                 "fmull %2\n\t"
87                 "fldl %1\n\t"
88                 "fsubp %%st,%%st(1)\n\t"
89                 "fistpl %0\n\t"
90                 "fwait\n\t"
91                 "fninit"
92                 : "=m" (*&fdiv_bug)
93                 : "m" (*&x), "m" (*&y));
94
95         kernel_fpu_end();
96
97         boot_cpu_data.fdiv_bug = fdiv_bug;
98         if (boot_cpu_data.fdiv_bug)
99                 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
100 }
101
102 static void __init check_hlt(void)
103 {
104         if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
105                 return;
106
107         printk(KERN_INFO "Checking 'hlt' instruction... ");
108         if (!boot_cpu_data.hlt_works_ok) {
109                 printk("disabled\n");
110                 return;
111         }
112         halt();
113         halt();
114         halt();
115         halt();
116         printk(KERN_CONT "OK.\n");
117 }
118
119 /*
120  *      Most 386 processors have a bug where a POPAD can lock the
121  *      machine even from user space.
122  */
123
124 static void __init check_popad(void)
125 {
126 #ifndef CONFIG_X86_POPAD_OK
127         int res, inp = (int) &res;
128
129         printk(KERN_INFO "Checking for popad bug... ");
130         __asm__ __volatile__(
131           "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
132           : "=&a" (res)
133           : "d" (inp)
134           : "ecx", "edi");
135         /*
136          * If this fails, it means that any user program may lock the
137          * CPU hard. Too bad.
138          */
139         if (res != 12345678)
140                 printk(KERN_CONT "Buggy.\n");
141         else
142                 printk(KERN_CONT "OK.\n");
143 #endif
144 }
145
146 /*
147  * Check whether we are able to run this kernel safely on SMP.
148  *
149  * - In order to run on a i386, we need to be compiled for i386
150  *   (for due to lack of "invlpg" and working WP on a i386)
151  * - In order to run on anything without a TSC, we need to be
152  *   compiled for a i486.
153  */
154
155 static void __init check_config(void)
156 {
157 /*
158  * We'd better not be a i386 if we're configured to use some
159  * i486+ only features! (WP works in supervisor mode and the
160  * new "invlpg" and "bswap" instructions)
161  */
162 #if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || \
163         defined(CONFIG_X86_BSWAP)
164         if (boot_cpu_data.x86 == 3)
165                 panic("Kernel requires i486+ for 'invlpg' and other features");
166 #endif
167 }
168
169 #endif /* CONFIG_X86_32 */
170
171 void __init check_bugs(void)
172 {
173 #ifdef CONFIG_X86_32
174         /*
175          * Regardless of whether PCID is enumerated, the SDM says
176          * that it can't be enabled in 32-bit mode.
177          */
178         setup_clear_cpu_cap(X86_FEATURE_PCID);
179 #endif
180
181         identify_boot_cpu();
182
183         if (!IS_ENABLED(CONFIG_SMP)) {
184                 pr_info("CPU: ");
185                 print_cpu_info(&boot_cpu_data);
186         }
187
188         /* Select the proper spectre mitigation before patching alternatives */
189         spectre_v2_select_mitigation();
190
191 #ifdef CONFIG_X86_32
192         check_config();
193         check_fpu();
194         check_hlt();
195         check_popad();
196         init_utsname()->machine[1] =
197                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
198         alternative_instructions();
199 #else /* CONFIG_X86_64 */
200         alternative_instructions();
201
202         /*
203          * Make sure the first 2MB area is not mapped by huge pages
204          * There are typically fixed size MTRRs in there and overlapping
205          * MTRRs into large pages causes slow downs.
206          *
207          * Right now we don't do that with gbpages because there seems
208          * very little benefit for that case.
209          */
210         if (!direct_gbpages)
211                 set_memory_4k((unsigned long)__va(0), 1);
212 #endif
213 }
214
215 /* The kernel command line selection */
216 enum spectre_v2_mitigation_cmd {
217         SPECTRE_V2_CMD_NONE,
218         SPECTRE_V2_CMD_AUTO,
219         SPECTRE_V2_CMD_FORCE,
220         SPECTRE_V2_CMD_RETPOLINE,
221         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
222         SPECTRE_V2_CMD_RETPOLINE_AMD,
223 };
224
225 static const char *spectre_v2_strings[] = {
226         [SPECTRE_V2_NONE]                       = "Vulnerable",
227         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
228         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
229         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
230         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
231 };
232
233 #undef pr_fmt
234 #define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
235
236 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
237
238 static void __init spec2_print_if_insecure(const char *reason)
239 {
240         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
241                 pr_info("%s\n", reason);
242 }
243
244 static void __init spec2_print_if_secure(const char *reason)
245 {
246         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
247                 pr_info("%s\n", reason);
248 }
249
250 static inline bool retp_compiler(void)
251 {
252         return __is_defined(RETPOLINE);
253 }
254
255 static inline bool match_option(const char *arg, int arglen, const char *opt)
256 {
257         int len = strlen(opt);
258
259         return len == arglen && !strncmp(arg, opt, len);
260 }
261
262 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
263 {
264         char arg[20];
265         int ret;
266
267         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
268                                   sizeof(arg));
269         if (ret > 0)  {
270                 if (match_option(arg, ret, "off")) {
271                         goto disable;
272                 } else if (match_option(arg, ret, "on")) {
273                         spec2_print_if_secure("force enabled on command line.");
274                         return SPECTRE_V2_CMD_FORCE;
275                 } else if (match_option(arg, ret, "retpoline")) {
276                         spec2_print_if_insecure("retpoline selected on command line.");
277                         return SPECTRE_V2_CMD_RETPOLINE;
278                 } else if (match_option(arg, ret, "retpoline,amd")) {
279                         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
280                                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
281                                 return SPECTRE_V2_CMD_AUTO;
282                         }
283                         spec2_print_if_insecure("AMD retpoline selected on command line.");
284                         return SPECTRE_V2_CMD_RETPOLINE_AMD;
285                 } else if (match_option(arg, ret, "retpoline,generic")) {
286                         spec2_print_if_insecure("generic retpoline selected on command line.");
287                         return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
288                 } else if (match_option(arg, ret, "auto")) {
289                         return SPECTRE_V2_CMD_AUTO;
290                 }
291         }
292
293         if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
294                 return SPECTRE_V2_CMD_AUTO;
295 disable:
296         spec2_print_if_insecure("disabled on command line.");
297         return SPECTRE_V2_CMD_NONE;
298 }
299
300 static void __init spectre_v2_select_mitigation(void)
301 {
302         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
303         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
304
305         /*
306          * If the CPU is not affected and the command line mode is NONE or AUTO
307          * then nothing to do.
308          */
309         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
310             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
311                 return;
312
313         switch (cmd) {
314         case SPECTRE_V2_CMD_NONE:
315                 return;
316
317         case SPECTRE_V2_CMD_FORCE:
318                 /* FALLTRHU */
319         case SPECTRE_V2_CMD_AUTO:
320                 goto retpoline_auto;
321
322         case SPECTRE_V2_CMD_RETPOLINE_AMD:
323                 if (IS_ENABLED(CONFIG_RETPOLINE))
324                         goto retpoline_amd;
325                 break;
326         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
327                 if (IS_ENABLED(CONFIG_RETPOLINE))
328                         goto retpoline_generic;
329                 break;
330         case SPECTRE_V2_CMD_RETPOLINE:
331                 if (IS_ENABLED(CONFIG_RETPOLINE))
332                         goto retpoline_auto;
333                 break;
334         }
335         pr_err("kernel not compiled with retpoline; no mitigation available!");
336         return;
337
338 retpoline_auto:
339         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
340         retpoline_amd:
341                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
342                         pr_err("LFENCE not serializing. Switching to generic retpoline\n");
343                         goto retpoline_generic;
344                 }
345                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
346                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
347                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
348                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
349         } else {
350         retpoline_generic:
351                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
352                                          SPECTRE_V2_RETPOLINE_MINIMAL;
353                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
354         }
355
356         spectre_v2_enabled = mode;
357         pr_info("%s\n", spectre_v2_strings[mode]);
358 }
359
360 #undef pr_fmt
361
362 #ifdef CONFIG_SYSFS
363 ssize_t cpu_show_meltdown(struct sysdev_class *dev,
364                           struct sysdev_class_attribute *attr, char *buf)
365 {
366         if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
367                 return sprintf(buf, "Not affected\n");
368         if (boot_cpu_has(X86_FEATURE_KAISER))
369                 return sprintf(buf, "Mitigation: PTI\n");
370         return sprintf(buf, "Vulnerable\n");
371 }
372
373 ssize_t cpu_show_spectre_v1(struct sysdev_class *dev,
374                             struct sysdev_class_attribute *attr, char *buf)
375 {
376         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
377                 return sprintf(buf, "Not affected\n");
378         return sprintf(buf, "Vulnerable\n");
379 }
380
381 ssize_t cpu_show_spectre_v2(struct sysdev_class *dev,
382                             struct sysdev_class_attribute *attr, char *buf)
383 {
384         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
385                 return sprintf(buf, "Not affected\n");
386
387         return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
388 }
389 #endif