1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
20 #include <asm/fixmap.h>
22 #define MAX_PATCH_LEN (255-1)
24 static int __initdata_or_module debug_alternative;
26 static int __init debug_alt(char *str)
28 debug_alternative = 1;
31 __setup("debug-alternative", debug_alt);
33 static int noreplace_smp;
35 static int __init setup_noreplace_smp(char *str)
40 __setup("noreplace-smp", setup_noreplace_smp);
42 #define DPRINTK(fmt, args...) \
44 if (debug_alternative) \
45 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
48 #define DUMP_BYTES(buf, len, fmt, args...) \
50 if (unlikely(debug_alternative)) { \
56 printk(KERN_DEBUG fmt, ##args); \
57 for (j = 0; j < (len) - 1; j++) \
58 printk(KERN_CONT "%02hhx ", buf[j]); \
59 printk(KERN_CONT "%02hhx\n", buf[j]); \
64 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
65 * that correspond to that nop. Getting from one nop to the next, we
66 * add to the array the offset that is equal to the sum of all sizes of
67 * nops preceding the one we are after.
69 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
70 * nice symmetry of sizes of the previous nops.
72 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
73 static const unsigned char intelnops[] =
85 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
91 intelnops + 1 + 2 + 3,
92 intelnops + 1 + 2 + 3 + 4,
93 intelnops + 1 + 2 + 3 + 4 + 5,
94 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
95 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
96 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
101 static const unsigned char k8nops[] =
113 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
120 k8nops + 1 + 2 + 3 + 4,
121 k8nops + 1 + 2 + 3 + 4 + 5,
122 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
123 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
124 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
128 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
129 static const unsigned char k7nops[] =
141 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
148 k7nops + 1 + 2 + 3 + 4,
149 k7nops + 1 + 2 + 3 + 4 + 5,
150 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
151 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
152 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
157 static const unsigned char p6nops[] =
169 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
176 p6nops + 1 + 2 + 3 + 4,
177 p6nops + 1 + 2 + 3 + 4 + 5,
178 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
179 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
180 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
184 /* Initialize these to a safe default */
186 const unsigned char * const *ideal_nops = p6_nops;
188 const unsigned char * const *ideal_nops = intel_nops;
191 void __init arch_init_ideal_nops(void)
193 switch (boot_cpu_data.x86_vendor) {
194 case X86_VENDOR_INTEL:
196 * Due to a decoder implementation quirk, some
197 * specific Intel CPUs actually perform better with
198 * the "k8_nops" than with the SDM-recommended NOPs.
200 if (boot_cpu_data.x86 == 6 &&
201 boot_cpu_data.x86_model >= 0x0f &&
202 boot_cpu_data.x86_model != 0x1c &&
203 boot_cpu_data.x86_model != 0x26 &&
204 boot_cpu_data.x86_model != 0x27 &&
205 boot_cpu_data.x86_model < 0x30) {
206 ideal_nops = k8_nops;
207 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
208 ideal_nops = p6_nops;
211 ideal_nops = k8_nops;
213 ideal_nops = intel_nops;
219 ideal_nops = k8_nops;
221 if (boot_cpu_has(X86_FEATURE_K8))
222 ideal_nops = k8_nops;
223 else if (boot_cpu_has(X86_FEATURE_K7))
224 ideal_nops = k7_nops;
226 ideal_nops = intel_nops;
231 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
232 static void __init_or_module add_nops(void *insns, unsigned int len)
235 unsigned int noplen = len;
236 if (noplen > ASM_NOP_MAX)
237 noplen = ASM_NOP_MAX;
238 memcpy(insns, ideal_nops[noplen], noplen);
244 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
245 extern s32 __smp_locks[], __smp_locks_end[];
246 void *text_poke_early(void *addr, const void *opcode, size_t len);
249 * Are we looking at a near JMP with a 1 or 4-byte displacement.
251 static inline bool is_jmp(const u8 opcode)
253 return opcode == 0xeb || opcode == 0xe9;
256 static void __init_or_module
257 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
259 u8 *next_rip, *tgt_rip;
263 if (a->replacementlen != 5)
266 o_dspl = *(s32 *)(insnbuf + 1);
268 /* next_rip of the replacement JMP */
269 next_rip = repl_insn + a->replacementlen;
270 /* target rip of the replacement JMP */
271 tgt_rip = next_rip + o_dspl;
272 n_dspl = tgt_rip - orig_insn;
274 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
276 if (tgt_rip - orig_insn >= 0) {
277 if (n_dspl - 2 <= 127)
281 /* negative offset */
283 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
293 insnbuf[1] = (s8)n_dspl;
294 add_nops(insnbuf + 2, 3);
303 *(s32 *)&insnbuf[1] = n_dspl;
309 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
310 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
313 static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
318 for (i = 0; i < a->padlen; i++) {
319 if (instr[i] != 0x90)
323 local_irq_save(flags);
324 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
326 local_irq_restore(flags);
328 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
329 instr, a->instrlen - a->padlen, a->padlen);
333 * Replace instructions with better alternatives for this CPU type. This runs
334 * before SMP is initialized to avoid SMP problems with self modifying code.
335 * This implies that asymmetric systems where APs have less capabilities than
336 * the boot processor are not handled. Tough. Make sure you disable such
339 void __init_or_module apply_alternatives(struct alt_instr *start,
340 struct alt_instr *end)
343 u8 *instr, *replacement;
344 u8 insnbuf[MAX_PATCH_LEN];
346 DPRINTK("alt table %p -> %p", start, end);
348 * The scan order should be from start to end. A later scanned
349 * alternative code can overwrite previously scanned alternative code.
350 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
353 * So be careful if you want to change the scan order to any other
356 for (a = start; a < end; a++) {
359 instr = (u8 *)&a->instr_offset + a->instr_offset;
360 replacement = (u8 *)&a->repl_offset + a->repl_offset;
361 BUG_ON(a->instrlen > sizeof(insnbuf));
362 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
363 if (!boot_cpu_has(a->cpuid)) {
365 optimize_nops(a, instr);
370 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
374 replacement, a->replacementlen, a->padlen);
376 DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
377 DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
379 memcpy(insnbuf, replacement, a->replacementlen);
380 insnbuf_sz = a->replacementlen;
382 /* 0xe8 is a relative jump; fix the offset. */
383 if (*insnbuf == 0xe8 && a->replacementlen == 5) {
384 *(s32 *)(insnbuf + 1) += replacement - instr;
385 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
386 *(s32 *)(insnbuf + 1),
387 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
390 if (a->replacementlen && is_jmp(replacement[0]))
391 recompute_jump(a, instr, replacement, insnbuf);
393 if (a->instrlen > a->replacementlen) {
394 add_nops(insnbuf + a->replacementlen,
395 a->instrlen - a->replacementlen);
396 insnbuf_sz += a->instrlen - a->replacementlen;
398 DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
400 text_poke_early(instr, insnbuf, insnbuf_sz);
405 static void alternatives_smp_lock(const s32 *start, const s32 *end,
406 u8 *text, u8 *text_end)
410 for (poff = start; poff < end; poff++) {
411 u8 *ptr = (u8 *)poff + *poff;
413 if (!*poff || ptr < text || ptr >= text_end)
415 /* turn DS segment override prefix into lock prefix */
417 text_poke(ptr, ((unsigned char []){0xf0}), 1);
421 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
422 u8 *text, u8 *text_end)
426 for (poff = start; poff < end; poff++) {
427 u8 *ptr = (u8 *)poff + *poff;
429 if (!*poff || ptr < text || ptr >= text_end)
431 /* turn lock prefix into DS segment override prefix */
433 text_poke(ptr, ((unsigned char []){0x3E}), 1);
437 struct smp_alt_module {
438 /* what is this ??? */
442 /* ptrs to lock prefixes */
444 const s32 *locks_end;
446 /* .text segment, needed to avoid patching init code ;) */
450 struct list_head next;
452 static LIST_HEAD(smp_alt_modules);
453 static bool uniproc_patched = false; /* protected by text_mutex */
455 void __init_or_module alternatives_smp_module_add(struct module *mod,
457 void *locks, void *locks_end,
458 void *text, void *text_end)
460 struct smp_alt_module *smp;
462 mutex_lock(&text_mutex);
463 if (!uniproc_patched)
466 if (num_possible_cpus() == 1)
467 /* Don't bother remembering, we'll never have to undo it. */
470 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
472 /* we'll run the (safe but slow) SMP code then ... */
478 smp->locks_end = locks_end;
480 smp->text_end = text_end;
481 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
482 smp->locks, smp->locks_end,
483 smp->text, smp->text_end, smp->name);
485 list_add_tail(&smp->next, &smp_alt_modules);
487 alternatives_smp_unlock(locks, locks_end, text, text_end);
489 mutex_unlock(&text_mutex);
492 void __init_or_module alternatives_smp_module_del(struct module *mod)
494 struct smp_alt_module *item;
496 mutex_lock(&text_mutex);
497 list_for_each_entry(item, &smp_alt_modules, next) {
498 if (mod != item->mod)
500 list_del(&item->next);
504 mutex_unlock(&text_mutex);
507 void alternatives_enable_smp(void)
509 struct smp_alt_module *mod;
511 #ifdef CONFIG_LOCKDEP
513 * Older binutils section handling bug prevented
514 * alternatives-replacement from working reliably.
516 * If this still occurs then you should see a hang
517 * or crash shortly after this line:
519 printk("lockdep: fixing up alternatives.\n");
522 /* Why bother if there are no other CPUs? */
523 BUG_ON(num_possible_cpus() == 1);
525 mutex_lock(&text_mutex);
527 if (uniproc_patched) {
528 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
529 BUG_ON(num_online_cpus() != 1);
530 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
531 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
532 list_for_each_entry(mod, &smp_alt_modules, next)
533 alternatives_smp_lock(mod->locks, mod->locks_end,
534 mod->text, mod->text_end);
535 uniproc_patched = false;
537 mutex_unlock(&text_mutex);
541 * Return 1 if the address range is reserved for SMP-alternatives.
542 * Must hold text_mutex.
544 int alternatives_text_reserved(void *start, void *end)
546 struct smp_alt_module *mod;
548 u8 *text_start = start;
551 lockdep_assert_held(&text_mutex);
553 list_for_each_entry(mod, &smp_alt_modules, next) {
554 if (mod->text > text_end || mod->text_end < text_start)
556 for (poff = mod->locks; poff < mod->locks_end; poff++) {
557 const u8 *ptr = (const u8 *)poff + *poff;
559 if (text_start <= ptr && text_end > ptr)
566 #endif /* CONFIG_SMP */
568 #ifdef CONFIG_PARAVIRT
569 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
570 struct paravirt_patch_site *end)
572 struct paravirt_patch_site *p;
573 char insnbuf[MAX_PATCH_LEN];
575 for (p = start; p < end; p++) {
578 BUG_ON(p->len > MAX_PATCH_LEN);
579 /* prep the buffer with the original instructions */
580 memcpy(insnbuf, p->instr, p->len);
581 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
582 (unsigned long)p->instr, p->len);
584 BUG_ON(used > p->len);
586 /* Pad the rest with nops */
587 add_nops(insnbuf + used, p->len - used);
588 text_poke_early(p->instr, insnbuf, p->len);
591 extern struct paravirt_patch_site __start_parainstructions[],
592 __stop_parainstructions[];
593 #endif /* CONFIG_PARAVIRT */
595 void __init alternative_instructions(void)
597 /* The patching is not fully atomic, so try to avoid local interruptions
598 that might execute the to be patched code.
599 Other CPUs are not running. */
603 * Don't stop machine check exceptions while patching.
604 * MCEs only happen when something got corrupted and in this
605 * case we must do something about the corruption.
606 * Ignoring it is worse than a unlikely patching race.
607 * Also machine checks tend to be broadcast and if one CPU
608 * goes into machine check the others follow quickly, so we don't
609 * expect a machine check to cause undue problems during to code
613 apply_alternatives(__alt_instructions, __alt_instructions_end);
616 /* Patch to UP if other cpus not imminent. */
617 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
618 uniproc_patched = true;
619 alternatives_smp_module_add(NULL, "core kernel",
620 __smp_locks, __smp_locks_end,
624 if (!uniproc_patched || num_possible_cpus() == 1)
625 free_init_pages("SMP alternatives",
626 (unsigned long)__smp_locks,
627 (unsigned long)__smp_locks_end);
630 apply_paravirt(__parainstructions, __parainstructions_end);
636 * text_poke_early - Update instructions on a live kernel at boot time
637 * @addr: address to modify
638 * @opcode: source of the copy
639 * @len: length to copy
641 * When you use this code to patch more than one byte of an instruction
642 * you need to make sure that other CPUs cannot execute this code in parallel.
643 * Also no thread must be currently preempted in the middle of these
644 * instructions. And on the local CPU you need to be protected again NMI or MCE
645 * handlers seeing an inconsistent instruction while you patch.
647 void *__init_or_module text_poke_early(void *addr, const void *opcode,
651 local_irq_save(flags);
652 memcpy(addr, opcode, len);
654 local_irq_restore(flags);
655 /* Could also do a CLFLUSH here to speed up CPU recovery; but
656 that causes hangs on some VIA CPUs. */
661 * text_poke - Update instructions on a live kernel
662 * @addr: address to modify
663 * @opcode: source of the copy
664 * @len: length to copy
666 * Only atomic text poke/set should be allowed when not doing early patching.
667 * It means the size must be writable atomically and the address must be aligned
668 * in a way that permits an atomic write. It also makes sure we fit on a single
671 * Note: Must be called under text_mutex.
673 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
677 struct page *pages[2];
680 if (!core_kernel_text((unsigned long)addr)) {
681 pages[0] = vmalloc_to_page(addr);
682 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
684 pages[0] = virt_to_page(addr);
685 WARN_ON(!PageReserved(pages[0]));
686 pages[1] = virt_to_page(addr + PAGE_SIZE);
689 local_irq_save(flags);
690 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
692 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
693 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
694 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
695 clear_fixmap(FIX_TEXT_POKE0);
697 clear_fixmap(FIX_TEXT_POKE1);
700 /* Could also do a CLFLUSH here to speed up CPU recovery; but
701 that causes hangs on some VIA CPUs. */
702 for (i = 0; i < len; i++)
703 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
704 local_irq_restore(flags);
709 * Cross-modifying kernel text with stop_machine().
710 * This code originally comes from immediate value.
712 static atomic_t stop_machine_first;
713 static int wrote_text;
715 struct text_poke_params {
716 struct text_poke_param *params;
720 static int __kprobes stop_machine_text_poke(void *data)
722 struct text_poke_params *tpp = data;
723 struct text_poke_param *p;
726 if (atomic_dec_and_test(&stop_machine_first)) {
727 for (i = 0; i < tpp->nparams; i++) {
729 text_poke(p->addr, p->opcode, p->len);
731 smp_wmb(); /* Make sure other cpus see that this has run */
736 smp_mb(); /* Load wrote_text before following execution */
739 for (i = 0; i < tpp->nparams; i++) {
741 flush_icache_range((unsigned long)p->addr,
742 (unsigned long)p->addr + p->len);
745 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
746 * that a core serializing instruction such as "cpuid" should be
747 * executed on _each_ core before the new instruction is made visible.
754 * text_poke_smp - Update instructions on a live kernel on SMP
755 * @addr: address to modify
756 * @opcode: source of the copy
757 * @len: length to copy
759 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
760 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
761 * should be allowed, since stop_machine() does _not_ protect code against
764 * Note: Must be called under get_online_cpus() and text_mutex.
766 void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
768 struct text_poke_params tpp;
769 struct text_poke_param p;
776 atomic_set(&stop_machine_first, 1);
778 /* Use __stop_machine() because the caller already got online_cpus. */
779 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
784 * text_poke_smp_batch - Update instructions on a live kernel on SMP
785 * @params: an array of text_poke parameters
786 * @n: the number of elements in params.
788 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
789 * stop_machine() is heavy task, it is better to aggregate text_poke requests
790 * and do it once if possible.
792 * Note: Must be called under get_online_cpus() and text_mutex.
794 void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
796 struct text_poke_params tpp = {.params = params, .nparams = n};
798 atomic_set(&stop_machine_first, 1);
800 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);