2 * Copyright (C) 1994 Linus Torvalds
4 * Cyrix stuff, June 1998 by:
5 * - Rafael R. Reilova (moved everything from head.S),
6 * <rreilova@ececs.uc.edu>
7 * - Channing Corn (tests & fixes),
8 * - Andrew D. Balsa (code cleanup).
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
14 #include <asm/nospec-branch.h>
15 #include <asm/cmdline.h>
17 #include <asm/processor.h>
18 #include <asm/processor-flags.h>
21 #include <asm/paravirt.h>
22 #include <asm/alternative.h>
23 #include <asm/pgtable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/intel-family.h>
27 static void __init spectre_v2_select_mitigation(void);
31 static int __init no_halt(char *s)
33 WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
34 boot_cpu_data.hlt_works_ok = 0;
38 __setup("no-hlt", no_halt);
40 static int __init no_387(char *s)
42 boot_cpu_data.hard_math = 0;
43 write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
47 __setup("no387", no_387);
49 static double __initdata x = 4195835.0;
50 static double __initdata y = 3145727.0;
53 * This used to check for exceptions..
54 * However, it turns out that to support that,
55 * the XMM trap handlers basically had to
56 * be buggy. So let's have a correct XMM trap
57 * handler, and forget about printing out
58 * some status at boot.
60 * We should really only care about bugs here
61 * anyway. Not features.
63 static void __init check_fpu(void)
67 if (!boot_cpu_data.hard_math) {
68 #ifndef CONFIG_MATH_EMULATION
69 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
70 printk(KERN_EMERG "Giving up.\n");
79 * trap_init() enabled FXSR and company _before_ testing for FP
82 * Test for the divl bug..
89 "fsubp %%st,%%st(1)\n\t"
94 : "m" (*&x), "m" (*&y));
98 boot_cpu_data.fdiv_bug = fdiv_bug;
99 if (boot_cpu_data.fdiv_bug)
100 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
103 static void __init check_hlt(void)
105 if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
108 printk(KERN_INFO "Checking 'hlt' instruction... ");
109 if (!boot_cpu_data.hlt_works_ok) {
110 printk("disabled\n");
117 printk(KERN_CONT "OK.\n");
121 * Most 386 processors have a bug where a POPAD can lock the
122 * machine even from user space.
125 static void __init check_popad(void)
127 #ifndef CONFIG_X86_POPAD_OK
128 int res, inp = (int) &res;
130 printk(KERN_INFO "Checking for popad bug... ");
131 __asm__ __volatile__(
132 "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
137 * If this fails, it means that any user program may lock the
141 printk(KERN_CONT "Buggy.\n");
143 printk(KERN_CONT "OK.\n");
148 * Check whether we are able to run this kernel safely on SMP.
150 * - In order to run on a i386, we need to be compiled for i386
151 * (for due to lack of "invlpg" and working WP on a i386)
152 * - In order to run on anything without a TSC, we need to be
153 * compiled for a i486.
156 static void __init check_config(void)
159 * We'd better not be a i386 if we're configured to use some
160 * i486+ only features! (WP works in supervisor mode and the
161 * new "invlpg" and "bswap" instructions)
163 #if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || \
164 defined(CONFIG_X86_BSWAP)
165 if (boot_cpu_data.x86 == 3)
166 panic("Kernel requires i486+ for 'invlpg' and other features");
170 #endif /* CONFIG_X86_32 */
172 void __init check_bugs(void)
176 * Regardless of whether PCID is enumerated, the SDM says
177 * that it can't be enabled in 32-bit mode.
179 setup_clear_cpu_cap(X86_FEATURE_PCID);
184 if (!IS_ENABLED(CONFIG_SMP)) {
186 print_cpu_info(&boot_cpu_data);
189 /* Select the proper spectre mitigation before patching alternatives */
190 spectre_v2_select_mitigation();
197 init_utsname()->machine[1] =
198 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
199 alternative_instructions();
200 #else /* CONFIG_X86_64 */
201 alternative_instructions();
204 * Make sure the first 2MB area is not mapped by huge pages
205 * There are typically fixed size MTRRs in there and overlapping
206 * MTRRs into large pages causes slow downs.
208 * Right now we don't do that with gbpages because there seems
209 * very little benefit for that case.
212 set_memory_4k((unsigned long)__va(0), 1);
216 /* The kernel command line selection */
217 enum spectre_v2_mitigation_cmd {
220 SPECTRE_V2_CMD_FORCE,
221 SPECTRE_V2_CMD_RETPOLINE,
222 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
223 SPECTRE_V2_CMD_RETPOLINE_AMD,
226 static const char *spectre_v2_strings[] = {
227 [SPECTRE_V2_NONE] = "Vulnerable",
228 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
229 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
230 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
231 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
235 #define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
237 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
239 static void __init spec2_print_if_insecure(const char *reason)
241 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
242 pr_info("%s\n", reason);
245 static void __init spec2_print_if_secure(const char *reason)
247 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
248 pr_info("%s\n", reason);
251 static inline bool retp_compiler(void)
253 return __is_defined(RETPOLINE);
256 static inline bool match_option(const char *arg, int arglen, const char *opt)
258 int len = strlen(opt);
260 return len == arglen && !strncmp(arg, opt, len);
263 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
268 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
271 if (match_option(arg, ret, "off")) {
273 } else if (match_option(arg, ret, "on")) {
274 spec2_print_if_secure("force enabled on command line.");
275 return SPECTRE_V2_CMD_FORCE;
276 } else if (match_option(arg, ret, "retpoline")) {
277 spec2_print_if_insecure("retpoline selected on command line.");
278 return SPECTRE_V2_CMD_RETPOLINE;
279 } else if (match_option(arg, ret, "retpoline,amd")) {
280 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
281 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
282 return SPECTRE_V2_CMD_AUTO;
284 spec2_print_if_insecure("AMD retpoline selected on command line.");
285 return SPECTRE_V2_CMD_RETPOLINE_AMD;
286 } else if (match_option(arg, ret, "retpoline,generic")) {
287 spec2_print_if_insecure("generic retpoline selected on command line.");
288 return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
289 } else if (match_option(arg, ret, "auto")) {
290 return SPECTRE_V2_CMD_AUTO;
294 if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
295 return SPECTRE_V2_CMD_AUTO;
297 spec2_print_if_insecure("disabled on command line.");
298 return SPECTRE_V2_CMD_NONE;
301 /* Check for Skylake-like CPUs (for RSB handling) */
302 static bool __init is_skylake_era(void)
304 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
305 boot_cpu_data.x86 == 6) {
306 switch (boot_cpu_data.x86_model) {
307 case INTEL_FAM6_SKYLAKE_MOBILE:
308 case INTEL_FAM6_SKYLAKE_DESKTOP:
309 case INTEL_FAM6_SKYLAKE_X:
310 case INTEL_FAM6_KABYLAKE_MOBILE:
311 case INTEL_FAM6_KABYLAKE_DESKTOP:
318 static void __init spectre_v2_select_mitigation(void)
320 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
321 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
324 * If the CPU is not affected and the command line mode is NONE or AUTO
325 * then nothing to do.
327 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
328 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
332 case SPECTRE_V2_CMD_NONE:
335 case SPECTRE_V2_CMD_FORCE:
337 case SPECTRE_V2_CMD_AUTO:
340 case SPECTRE_V2_CMD_RETPOLINE_AMD:
341 if (IS_ENABLED(CONFIG_RETPOLINE))
344 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
345 if (IS_ENABLED(CONFIG_RETPOLINE))
346 goto retpoline_generic;
348 case SPECTRE_V2_CMD_RETPOLINE:
349 if (IS_ENABLED(CONFIG_RETPOLINE))
353 pr_err("kernel not compiled with retpoline; no mitigation available!");
357 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
359 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
360 pr_err("LFENCE not serializing. Switching to generic retpoline\n");
361 goto retpoline_generic;
363 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
364 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
365 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
366 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
369 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
370 SPECTRE_V2_RETPOLINE_MINIMAL;
371 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
374 spectre_v2_enabled = mode;
375 pr_info("%s\n", spectre_v2_strings[mode]);
378 * If neither SMEP or KPTI are available, there is a risk of
379 * hitting userspace addresses in the RSB after a context switch
380 * from a shallow call stack to a deeper one. To prevent this fill
381 * the entire RSB, even when using IBRS.
383 * Skylake era CPUs have a separate issue with *underflow* of the
384 * RSB, when they will predict 'ret' targets from the generic BTB.
385 * The proper mitigation for this is IBRS. If IBRS is not supported
386 * or deactivated in favour of retpolines the RSB fill on context
387 * switch is required.
389 if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
390 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
391 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
392 pr_info("Filling RSB on context switch\n");
399 ssize_t cpu_show_meltdown(struct sysdev_class *dev,
400 struct sysdev_class_attribute *attr, char *buf)
402 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
403 return sprintf(buf, "Not affected\n");
404 if (boot_cpu_has(X86_FEATURE_KAISER))
405 return sprintf(buf, "Mitigation: PTI\n");
406 return sprintf(buf, "Vulnerable\n");
409 ssize_t cpu_show_spectre_v1(struct sysdev_class *dev,
410 struct sysdev_class_attribute *attr, char *buf)
412 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
413 return sprintf(buf, "Not affected\n");
414 return sprintf(buf, "Vulnerable\n");
417 ssize_t cpu_show_spectre_v2(struct sysdev_class *dev,
418 struct sysdev_class_attribute *attr, char *buf)
420 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
421 return sprintf(buf, "Not affected\n");
423 return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);