2 * Copyright (C) 1994 Linus Torvalds
4 * Cyrix stuff, June 1998 by:
5 * - Rafael R. Reilova (moved everything from head.S),
6 * <rreilova@ececs.uc.edu>
7 * - Channing Corn (tests & fixes),
8 * - Andrew D. Balsa (code cleanup).
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
15 #include <asm/nospec-branch.h>
16 #include <asm/cmdline.h>
18 #include <asm/processor.h>
19 #include <asm/processor-flags.h>
22 #include <asm/paravirt.h>
23 #include <asm/alternative.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/intel-family.h>
28 static void __init spectre_v2_select_mitigation(void);
32 static int __init no_halt(char *s)
34 WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
35 boot_cpu_data.hlt_works_ok = 0;
39 __setup("no-hlt", no_halt);
41 static int __init no_387(char *s)
43 boot_cpu_data.hard_math = 0;
44 write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
48 __setup("no387", no_387);
50 static double __initdata x = 4195835.0;
51 static double __initdata y = 3145727.0;
54 * This used to check for exceptions..
55 * However, it turns out that to support that,
56 * the XMM trap handlers basically had to
57 * be buggy. So let's have a correct XMM trap
58 * handler, and forget about printing out
59 * some status at boot.
61 * We should really only care about bugs here
62 * anyway. Not features.
64 static void __init check_fpu(void)
68 if (!boot_cpu_data.hard_math) {
69 #ifndef CONFIG_MATH_EMULATION
70 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
71 printk(KERN_EMERG "Giving up.\n");
80 * trap_init() enabled FXSR and company _before_ testing for FP
83 * Test for the divl bug..
90 "fsubp %%st,%%st(1)\n\t"
95 : "m" (*&x), "m" (*&y));
99 boot_cpu_data.fdiv_bug = fdiv_bug;
100 if (boot_cpu_data.fdiv_bug)
101 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
104 static void __init check_hlt(void)
106 if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
109 printk(KERN_INFO "Checking 'hlt' instruction... ");
110 if (!boot_cpu_data.hlt_works_ok) {
111 printk("disabled\n");
118 printk(KERN_CONT "OK.\n");
122 * Most 386 processors have a bug where a POPAD can lock the
123 * machine even from user space.
126 static void __init check_popad(void)
128 #ifndef CONFIG_X86_POPAD_OK
129 int res, inp = (int) &res;
131 printk(KERN_INFO "Checking for popad bug... ");
132 __asm__ __volatile__(
133 "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
138 * If this fails, it means that any user program may lock the
142 printk(KERN_CONT "Buggy.\n");
144 printk(KERN_CONT "OK.\n");
149 * Check whether we are able to run this kernel safely on SMP.
151 * - In order to run on a i386, we need to be compiled for i386
152 * (for due to lack of "invlpg" and working WP on a i386)
153 * - In order to run on anything without a TSC, we need to be
154 * compiled for a i486.
157 static void __init check_config(void)
160 * We'd better not be a i386 if we're configured to use some
161 * i486+ only features! (WP works in supervisor mode and the
162 * new "invlpg" and "bswap" instructions)
164 #if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || \
165 defined(CONFIG_X86_BSWAP)
166 if (boot_cpu_data.x86 == 3)
167 panic("Kernel requires i486+ for 'invlpg' and other features");
171 #endif /* CONFIG_X86_32 */
173 void __init check_bugs(void)
177 * Regardless of whether PCID is enumerated, the SDM says
178 * that it can't be enabled in 32-bit mode.
180 setup_clear_cpu_cap(X86_FEATURE_PCID);
185 if (!IS_ENABLED(CONFIG_SMP)) {
187 print_cpu_info(&boot_cpu_data);
190 /* Select the proper spectre mitigation before patching alternatives */
191 spectre_v2_select_mitigation();
198 init_utsname()->machine[1] =
199 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
200 alternative_instructions();
201 #else /* CONFIG_X86_64 */
202 alternative_instructions();
205 * Make sure the first 2MB area is not mapped by huge pages
206 * There are typically fixed size MTRRs in there and overlapping
207 * MTRRs into large pages causes slow downs.
209 * Right now we don't do that with gbpages because there seems
210 * very little benefit for that case.
213 set_memory_4k((unsigned long)__va(0), 1);
217 /* The kernel command line selection */
218 enum spectre_v2_mitigation_cmd {
221 SPECTRE_V2_CMD_FORCE,
222 SPECTRE_V2_CMD_RETPOLINE,
223 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
224 SPECTRE_V2_CMD_RETPOLINE_AMD,
227 static const char *spectre_v2_strings[] = {
228 [SPECTRE_V2_NONE] = "Vulnerable",
229 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
230 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
231 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
232 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
236 #define pr_fmt(fmt) "Spectre V2 : " fmt
238 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
241 static bool spectre_v2_bad_module;
243 bool retpoline_module_ok(bool has_retpoline)
245 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
248 pr_err("System may be vunerable to spectre v2\n");
249 spectre_v2_bad_module = true;
253 static inline const char *spectre_v2_module_string(void)
255 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
258 static inline const char *spectre_v2_module_string(void) { return ""; }
261 static void __init spec2_print_if_insecure(const char *reason)
263 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
264 pr_info("%s\n", reason);
267 static void __init spec2_print_if_secure(const char *reason)
269 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
270 pr_info("%s\n", reason);
273 static inline bool retp_compiler(void)
275 return __is_defined(RETPOLINE);
278 static inline bool match_option(const char *arg, int arglen, const char *opt)
280 int len = strlen(opt);
282 return len == arglen && !strncmp(arg, opt, len);
285 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
290 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
293 if (match_option(arg, ret, "off")) {
295 } else if (match_option(arg, ret, "on")) {
296 spec2_print_if_secure("force enabled on command line.");
297 return SPECTRE_V2_CMD_FORCE;
298 } else if (match_option(arg, ret, "retpoline")) {
299 spec2_print_if_insecure("retpoline selected on command line.");
300 return SPECTRE_V2_CMD_RETPOLINE;
301 } else if (match_option(arg, ret, "retpoline,amd")) {
302 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
303 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
304 return SPECTRE_V2_CMD_AUTO;
306 spec2_print_if_insecure("AMD retpoline selected on command line.");
307 return SPECTRE_V2_CMD_RETPOLINE_AMD;
308 } else if (match_option(arg, ret, "retpoline,generic")) {
309 spec2_print_if_insecure("generic retpoline selected on command line.");
310 return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
311 } else if (match_option(arg, ret, "auto")) {
312 return SPECTRE_V2_CMD_AUTO;
316 if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
317 return SPECTRE_V2_CMD_AUTO;
319 spec2_print_if_insecure("disabled on command line.");
320 return SPECTRE_V2_CMD_NONE;
323 /* Check for Skylake-like CPUs (for RSB handling) */
324 static bool __init is_skylake_era(void)
326 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
327 boot_cpu_data.x86 == 6) {
328 switch (boot_cpu_data.x86_model) {
329 case INTEL_FAM6_SKYLAKE_MOBILE:
330 case INTEL_FAM6_SKYLAKE_DESKTOP:
331 case INTEL_FAM6_SKYLAKE_X:
332 case INTEL_FAM6_KABYLAKE_MOBILE:
333 case INTEL_FAM6_KABYLAKE_DESKTOP:
340 static void __init spectre_v2_select_mitigation(void)
342 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
343 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
346 * If the CPU is not affected and the command line mode is NONE or AUTO
347 * then nothing to do.
349 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
350 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
354 case SPECTRE_V2_CMD_NONE:
357 case SPECTRE_V2_CMD_FORCE:
358 case SPECTRE_V2_CMD_AUTO:
359 if (IS_ENABLED(CONFIG_RETPOLINE))
362 case SPECTRE_V2_CMD_RETPOLINE_AMD:
363 if (IS_ENABLED(CONFIG_RETPOLINE))
366 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
367 if (IS_ENABLED(CONFIG_RETPOLINE))
368 goto retpoline_generic;
370 case SPECTRE_V2_CMD_RETPOLINE:
371 if (IS_ENABLED(CONFIG_RETPOLINE))
375 pr_err("kernel not compiled with retpoline; no mitigation available!");
379 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
381 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
382 pr_err("LFENCE not serializing. Switching to generic retpoline\n");
383 goto retpoline_generic;
385 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
386 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
387 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
388 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
391 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
392 SPECTRE_V2_RETPOLINE_MINIMAL;
393 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
396 spectre_v2_enabled = mode;
397 pr_info("%s\n", spectre_v2_strings[mode]);
400 * If neither SMEP or KPTI are available, there is a risk of
401 * hitting userspace addresses in the RSB after a context switch
402 * from a shallow call stack to a deeper one. To prevent this fill
403 * the entire RSB, even when using IBRS.
405 * Skylake era CPUs have a separate issue with *underflow* of the
406 * RSB, when they will predict 'ret' targets from the generic BTB.
407 * The proper mitigation for this is IBRS. If IBRS is not supported
408 * or deactivated in favour of retpolines the RSB fill on context
409 * switch is required.
411 if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
412 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
413 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
414 pr_info("Filling RSB on context switch\n");
421 ssize_t cpu_show_meltdown(struct sysdev_class *dev,
422 struct sysdev_class_attribute *attr, char *buf)
424 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
425 return sprintf(buf, "Not affected\n");
426 if (boot_cpu_has(X86_FEATURE_KAISER))
427 return sprintf(buf, "Mitigation: PTI\n");
428 return sprintf(buf, "Vulnerable\n");
431 ssize_t cpu_show_spectre_v1(struct sysdev_class *dev,
432 struct sysdev_class_attribute *attr, char *buf)
434 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
435 return sprintf(buf, "Not affected\n");
436 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
439 ssize_t cpu_show_spectre_v2(struct sysdev_class *dev,
440 struct sysdev_class_attribute *attr, char *buf)
442 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
443 return sprintf(buf, "Not affected\n");
445 return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled],
446 spectre_v2_module_string());