x86/retpoline: Fill RSB on context switch for affected CPUs
[pandora-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 /*
2  *  Copyright (C) 1994  Linus Torvalds
3  *
4  *  Cyrix stuff, June 1998 by:
5  *      - Rafael R. Reilova (moved everything from head.S),
6  *        <rreilova@ececs.uc.edu>
7  *      - Channing Corn (tests & fixes),
8  *      - Andrew D. Balsa (code cleanup).
9  */
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13
14 #include <asm/nospec-branch.h>
15 #include <asm/cmdline.h>
16 #include <asm/bugs.h>
17 #include <asm/processor.h>
18 #include <asm/processor-flags.h>
19 #include <asm/i387.h>
20 #include <asm/msr.h>
21 #include <asm/paravirt.h>
22 #include <asm/alternative.h>
23 #include <asm/pgtable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/intel-family.h>
26
27 static void __init spectre_v2_select_mitigation(void);
28
29 #ifdef CONFIG_X86_32
30
31 static int __init no_halt(char *s)
32 {
33         WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
34         boot_cpu_data.hlt_works_ok = 0;
35         return 1;
36 }
37
38 __setup("no-hlt", no_halt);
39
40 static int __init no_387(char *s)
41 {
42         boot_cpu_data.hard_math = 0;
43         write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
44         return 1;
45 }
46
47 __setup("no387", no_387);
48
49 static double __initdata x = 4195835.0;
50 static double __initdata y = 3145727.0;
51
52 /*
53  * This used to check for exceptions..
54  * However, it turns out that to support that,
55  * the XMM trap handlers basically had to
56  * be buggy. So let's have a correct XMM trap
57  * handler, and forget about printing out
58  * some status at boot.
59  *
60  * We should really only care about bugs here
61  * anyway. Not features.
62  */
63 static void __init check_fpu(void)
64 {
65         s32 fdiv_bug;
66
67         if (!boot_cpu_data.hard_math) {
68 #ifndef CONFIG_MATH_EMULATION
69                 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
70                 printk(KERN_EMERG "Giving up.\n");
71                 for (;;) ;
72 #endif
73                 return;
74         }
75
76         kernel_fpu_begin();
77
78         /*
79          * trap_init() enabled FXSR and company _before_ testing for FP
80          * problems here.
81          *
82          * Test for the divl bug..
83          */
84         __asm__("fninit\n\t"
85                 "fldl %1\n\t"
86                 "fdivl %2\n\t"
87                 "fmull %2\n\t"
88                 "fldl %1\n\t"
89                 "fsubp %%st,%%st(1)\n\t"
90                 "fistpl %0\n\t"
91                 "fwait\n\t"
92                 "fninit"
93                 : "=m" (*&fdiv_bug)
94                 : "m" (*&x), "m" (*&y));
95
96         kernel_fpu_end();
97
98         boot_cpu_data.fdiv_bug = fdiv_bug;
99         if (boot_cpu_data.fdiv_bug)
100                 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
101 }
102
103 static void __init check_hlt(void)
104 {
105         if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
106                 return;
107
108         printk(KERN_INFO "Checking 'hlt' instruction... ");
109         if (!boot_cpu_data.hlt_works_ok) {
110                 printk("disabled\n");
111                 return;
112         }
113         halt();
114         halt();
115         halt();
116         halt();
117         printk(KERN_CONT "OK.\n");
118 }
119
120 /*
121  *      Most 386 processors have a bug where a POPAD can lock the
122  *      machine even from user space.
123  */
124
125 static void __init check_popad(void)
126 {
127 #ifndef CONFIG_X86_POPAD_OK
128         int res, inp = (int) &res;
129
130         printk(KERN_INFO "Checking for popad bug... ");
131         __asm__ __volatile__(
132           "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
133           : "=&a" (res)
134           : "d" (inp)
135           : "ecx", "edi");
136         /*
137          * If this fails, it means that any user program may lock the
138          * CPU hard. Too bad.
139          */
140         if (res != 12345678)
141                 printk(KERN_CONT "Buggy.\n");
142         else
143                 printk(KERN_CONT "OK.\n");
144 #endif
145 }
146
147 /*
148  * Check whether we are able to run this kernel safely on SMP.
149  *
150  * - In order to run on a i386, we need to be compiled for i386
151  *   (for due to lack of "invlpg" and working WP on a i386)
152  * - In order to run on anything without a TSC, we need to be
153  *   compiled for a i486.
154  */
155
156 static void __init check_config(void)
157 {
158 /*
159  * We'd better not be a i386 if we're configured to use some
160  * i486+ only features! (WP works in supervisor mode and the
161  * new "invlpg" and "bswap" instructions)
162  */
163 #if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || \
164         defined(CONFIG_X86_BSWAP)
165         if (boot_cpu_data.x86 == 3)
166                 panic("Kernel requires i486+ for 'invlpg' and other features");
167 #endif
168 }
169
170 #endif /* CONFIG_X86_32 */
171
172 void __init check_bugs(void)
173 {
174 #ifdef CONFIG_X86_32
175         /*
176          * Regardless of whether PCID is enumerated, the SDM says
177          * that it can't be enabled in 32-bit mode.
178          */
179         setup_clear_cpu_cap(X86_FEATURE_PCID);
180 #endif
181
182         identify_boot_cpu();
183
184         if (!IS_ENABLED(CONFIG_SMP)) {
185                 pr_info("CPU: ");
186                 print_cpu_info(&boot_cpu_data);
187         }
188
189         /* Select the proper spectre mitigation before patching alternatives */
190         spectre_v2_select_mitigation();
191
192 #ifdef CONFIG_X86_32
193         check_config();
194         check_fpu();
195         check_hlt();
196         check_popad();
197         init_utsname()->machine[1] =
198                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
199         alternative_instructions();
200 #else /* CONFIG_X86_64 */
201         alternative_instructions();
202
203         /*
204          * Make sure the first 2MB area is not mapped by huge pages
205          * There are typically fixed size MTRRs in there and overlapping
206          * MTRRs into large pages causes slow downs.
207          *
208          * Right now we don't do that with gbpages because there seems
209          * very little benefit for that case.
210          */
211         if (!direct_gbpages)
212                 set_memory_4k((unsigned long)__va(0), 1);
213 #endif
214 }
215
216 /* The kernel command line selection */
217 enum spectre_v2_mitigation_cmd {
218         SPECTRE_V2_CMD_NONE,
219         SPECTRE_V2_CMD_AUTO,
220         SPECTRE_V2_CMD_FORCE,
221         SPECTRE_V2_CMD_RETPOLINE,
222         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
223         SPECTRE_V2_CMD_RETPOLINE_AMD,
224 };
225
226 static const char *spectre_v2_strings[] = {
227         [SPECTRE_V2_NONE]                       = "Vulnerable",
228         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
229         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
230         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
231         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
232 };
233
234 #undef pr_fmt
235 #define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
236
237 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
238
239 static void __init spec2_print_if_insecure(const char *reason)
240 {
241         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
242                 pr_info("%s\n", reason);
243 }
244
245 static void __init spec2_print_if_secure(const char *reason)
246 {
247         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
248                 pr_info("%s\n", reason);
249 }
250
251 static inline bool retp_compiler(void)
252 {
253         return __is_defined(RETPOLINE);
254 }
255
256 static inline bool match_option(const char *arg, int arglen, const char *opt)
257 {
258         int len = strlen(opt);
259
260         return len == arglen && !strncmp(arg, opt, len);
261 }
262
263 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
264 {
265         char arg[20];
266         int ret;
267
268         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
269                                   sizeof(arg));
270         if (ret > 0)  {
271                 if (match_option(arg, ret, "off")) {
272                         goto disable;
273                 } else if (match_option(arg, ret, "on")) {
274                         spec2_print_if_secure("force enabled on command line.");
275                         return SPECTRE_V2_CMD_FORCE;
276                 } else if (match_option(arg, ret, "retpoline")) {
277                         spec2_print_if_insecure("retpoline selected on command line.");
278                         return SPECTRE_V2_CMD_RETPOLINE;
279                 } else if (match_option(arg, ret, "retpoline,amd")) {
280                         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
281                                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
282                                 return SPECTRE_V2_CMD_AUTO;
283                         }
284                         spec2_print_if_insecure("AMD retpoline selected on command line.");
285                         return SPECTRE_V2_CMD_RETPOLINE_AMD;
286                 } else if (match_option(arg, ret, "retpoline,generic")) {
287                         spec2_print_if_insecure("generic retpoline selected on command line.");
288                         return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
289                 } else if (match_option(arg, ret, "auto")) {
290                         return SPECTRE_V2_CMD_AUTO;
291                 }
292         }
293
294         if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
295                 return SPECTRE_V2_CMD_AUTO;
296 disable:
297         spec2_print_if_insecure("disabled on command line.");
298         return SPECTRE_V2_CMD_NONE;
299 }
300
301 /* Check for Skylake-like CPUs (for RSB handling) */
302 static bool __init is_skylake_era(void)
303 {
304         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
305             boot_cpu_data.x86 == 6) {
306                 switch (boot_cpu_data.x86_model) {
307                 case INTEL_FAM6_SKYLAKE_MOBILE:
308                 case INTEL_FAM6_SKYLAKE_DESKTOP:
309                 case INTEL_FAM6_SKYLAKE_X:
310                 case INTEL_FAM6_KABYLAKE_MOBILE:
311                 case INTEL_FAM6_KABYLAKE_DESKTOP:
312                         return true;
313                 }
314         }
315         return false;
316 }
317
318 static void __init spectre_v2_select_mitigation(void)
319 {
320         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
321         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
322
323         /*
324          * If the CPU is not affected and the command line mode is NONE or AUTO
325          * then nothing to do.
326          */
327         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
328             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
329                 return;
330
331         switch (cmd) {
332         case SPECTRE_V2_CMD_NONE:
333                 return;
334
335         case SPECTRE_V2_CMD_FORCE:
336                 /* FALLTRHU */
337         case SPECTRE_V2_CMD_AUTO:
338                 goto retpoline_auto;
339
340         case SPECTRE_V2_CMD_RETPOLINE_AMD:
341                 if (IS_ENABLED(CONFIG_RETPOLINE))
342                         goto retpoline_amd;
343                 break;
344         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
345                 if (IS_ENABLED(CONFIG_RETPOLINE))
346                         goto retpoline_generic;
347                 break;
348         case SPECTRE_V2_CMD_RETPOLINE:
349                 if (IS_ENABLED(CONFIG_RETPOLINE))
350                         goto retpoline_auto;
351                 break;
352         }
353         pr_err("kernel not compiled with retpoline; no mitigation available!");
354         return;
355
356 retpoline_auto:
357         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
358         retpoline_amd:
359                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
360                         pr_err("LFENCE not serializing. Switching to generic retpoline\n");
361                         goto retpoline_generic;
362                 }
363                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
364                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
365                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
366                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
367         } else {
368         retpoline_generic:
369                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
370                                          SPECTRE_V2_RETPOLINE_MINIMAL;
371                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
372         }
373
374         spectre_v2_enabled = mode;
375         pr_info("%s\n", spectre_v2_strings[mode]);
376
377         /*
378          * If neither SMEP or KPTI are available, there is a risk of
379          * hitting userspace addresses in the RSB after a context switch
380          * from a shallow call stack to a deeper one. To prevent this fill
381          * the entire RSB, even when using IBRS.
382          *
383          * Skylake era CPUs have a separate issue with *underflow* of the
384          * RSB, when they will predict 'ret' targets from the generic BTB.
385          * The proper mitigation for this is IBRS. If IBRS is not supported
386          * or deactivated in favour of retpolines the RSB fill on context
387          * switch is required.
388          */
389         if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
390              !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
391                 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
392                 pr_info("Filling RSB on context switch\n");
393         }
394 }
395
396 #undef pr_fmt
397
398 #ifdef CONFIG_SYSFS
399 ssize_t cpu_show_meltdown(struct sysdev_class *dev,
400                           struct sysdev_class_attribute *attr, char *buf)
401 {
402         if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
403                 return sprintf(buf, "Not affected\n");
404         if (boot_cpu_has(X86_FEATURE_KAISER))
405                 return sprintf(buf, "Mitigation: PTI\n");
406         return sprintf(buf, "Vulnerable\n");
407 }
408
409 ssize_t cpu_show_spectre_v1(struct sysdev_class *dev,
410                             struct sysdev_class_attribute *attr, char *buf)
411 {
412         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
413                 return sprintf(buf, "Not affected\n");
414         return sprintf(buf, "Vulnerable\n");
415 }
416
417 ssize_t cpu_show_spectre_v2(struct sysdev_class *dev,
418                             struct sysdev_class_attribute *attr, char *buf)
419 {
420         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
421                 return sprintf(buf, "Not affected\n");
422
423         return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
424 }
425 #endif