1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
20 #include <asm/fixmap.h>
22 #define MAX_PATCH_LEN (255-1)
24 static int __initdata_or_module debug_alternative;
26 static int __init debug_alt(char *str)
28 debug_alternative = 1;
31 __setup("debug-alternative", debug_alt);
33 static int noreplace_smp;
35 static int __init setup_noreplace_smp(char *str)
40 __setup("noreplace-smp", setup_noreplace_smp);
42 #ifdef CONFIG_PARAVIRT
43 static int __initdata_or_module noreplace_paravirt = 0;
45 static int __init setup_noreplace_paravirt(char *str)
47 noreplace_paravirt = 1;
50 __setup("noreplace-paravirt", setup_noreplace_paravirt);
53 #define DPRINTK(fmt, args...) \
55 if (debug_alternative) \
56 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
59 #define DUMP_BYTES(buf, len, fmt, args...) \
61 if (unlikely(debug_alternative)) { \
67 printk(KERN_DEBUG fmt, ##args); \
68 for (j = 0; j < (len) - 1; j++) \
69 printk(KERN_CONT "%02hhx ", buf[j]); \
70 printk(KERN_CONT "%02hhx\n", buf[j]); \
75 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
76 * that correspond to that nop. Getting from one nop to the next, we
77 * add to the array the offset that is equal to the sum of all sizes of
78 * nops preceding the one we are after.
80 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
81 * nice symmetry of sizes of the previous nops.
83 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
84 static const unsigned char intelnops[] =
96 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
102 intelnops + 1 + 2 + 3,
103 intelnops + 1 + 2 + 3 + 4,
104 intelnops + 1 + 2 + 3 + 4 + 5,
105 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
106 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
107 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
112 static const unsigned char k8nops[] =
124 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
131 k8nops + 1 + 2 + 3 + 4,
132 k8nops + 1 + 2 + 3 + 4 + 5,
133 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
134 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
135 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
139 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
140 static const unsigned char k7nops[] =
152 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
159 k7nops + 1 + 2 + 3 + 4,
160 k7nops + 1 + 2 + 3 + 4 + 5,
161 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
162 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
163 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
168 static const unsigned char p6nops[] =
180 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
187 p6nops + 1 + 2 + 3 + 4,
188 p6nops + 1 + 2 + 3 + 4 + 5,
189 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
190 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
191 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
195 /* Initialize these to a safe default */
197 const unsigned char * const *ideal_nops = p6_nops;
199 const unsigned char * const *ideal_nops = intel_nops;
202 void __init arch_init_ideal_nops(void)
204 switch (boot_cpu_data.x86_vendor) {
205 case X86_VENDOR_INTEL:
207 * Due to a decoder implementation quirk, some
208 * specific Intel CPUs actually perform better with
209 * the "k8_nops" than with the SDM-recommended NOPs.
211 if (boot_cpu_data.x86 == 6 &&
212 boot_cpu_data.x86_model >= 0x0f &&
213 boot_cpu_data.x86_model != 0x1c &&
214 boot_cpu_data.x86_model != 0x26 &&
215 boot_cpu_data.x86_model != 0x27 &&
216 boot_cpu_data.x86_model < 0x30) {
217 ideal_nops = k8_nops;
218 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
219 ideal_nops = p6_nops;
222 ideal_nops = k8_nops;
224 ideal_nops = intel_nops;
230 ideal_nops = k8_nops;
232 if (boot_cpu_has(X86_FEATURE_K8))
233 ideal_nops = k8_nops;
234 else if (boot_cpu_has(X86_FEATURE_K7))
235 ideal_nops = k7_nops;
237 ideal_nops = intel_nops;
242 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
243 static void __init_or_module add_nops(void *insns, unsigned int len)
246 unsigned int noplen = len;
247 if (noplen > ASM_NOP_MAX)
248 noplen = ASM_NOP_MAX;
249 memcpy(insns, ideal_nops[noplen], noplen);
255 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
256 extern s32 __smp_locks[], __smp_locks_end[];
257 void *text_poke_early(void *addr, const void *opcode, size_t len);
260 * Are we looking at a near JMP with a 1 or 4-byte displacement.
262 static inline bool is_jmp(const u8 opcode)
264 return opcode == 0xeb || opcode == 0xe9;
267 static void __init_or_module
268 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
270 u8 *next_rip, *tgt_rip;
274 if (a->replacementlen != 5)
277 o_dspl = *(s32 *)(insnbuf + 1);
279 /* next_rip of the replacement JMP */
280 next_rip = repl_insn + a->replacementlen;
281 /* target rip of the replacement JMP */
282 tgt_rip = next_rip + o_dspl;
283 n_dspl = tgt_rip - orig_insn;
285 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
287 if (tgt_rip - orig_insn >= 0) {
288 if (n_dspl - 2 <= 127)
292 /* negative offset */
294 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
304 insnbuf[1] = (s8)n_dspl;
305 add_nops(insnbuf + 2, 3);
314 *(s32 *)&insnbuf[1] = n_dspl;
320 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
321 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
324 static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
326 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
328 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
329 instr, a->instrlen - a->padlen, a->padlen);
333 * Replace instructions with better alternatives for this CPU type. This runs
334 * before SMP is initialized to avoid SMP problems with self modifying code.
335 * This implies that asymmetric systems where APs have less capabilities than
336 * the boot processor are not handled. Tough. Make sure you disable such
339 void __init_or_module apply_alternatives(struct alt_instr *start,
340 struct alt_instr *end)
343 u8 *instr, *replacement;
344 u8 insnbuf[MAX_PATCH_LEN];
346 DPRINTK("alt table %p -> %p", start, end);
348 * The scan order should be from start to end. A later scanned
349 * alternative code can overwrite previously scanned alternative code.
350 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
353 * So be careful if you want to change the scan order to any other
356 for (a = start; a < end; a++) {
359 instr = (u8 *)&a->instr_offset + a->instr_offset;
360 replacement = (u8 *)&a->repl_offset + a->repl_offset;
361 BUG_ON(a->instrlen > sizeof(insnbuf));
362 BUG_ON(a->cpuid >= NCAPINTS*32);
363 if (!boot_cpu_has(a->cpuid)) {
365 optimize_nops(a, instr);
370 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d)",
374 replacement, a->replacementlen);
376 DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
377 DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
379 memcpy(insnbuf, replacement, a->replacementlen);
380 insnbuf_sz = a->replacementlen;
382 /* 0xe8 is a relative jump; fix the offset. */
383 if (*insnbuf == 0xe8 && a->replacementlen == 5) {
384 *(s32 *)(insnbuf + 1) += replacement - instr;
385 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
386 *(s32 *)(insnbuf + 1),
387 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
390 if (a->replacementlen && is_jmp(replacement[0]))
391 recompute_jump(a, instr, replacement, insnbuf);
393 if (a->instrlen > a->replacementlen) {
394 add_nops(insnbuf + a->replacementlen,
395 a->instrlen - a->replacementlen);
396 insnbuf_sz += a->instrlen - a->replacementlen;
398 DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
400 text_poke_early(instr, insnbuf, insnbuf_sz);
405 static void alternatives_smp_lock(const s32 *start, const s32 *end,
406 u8 *text, u8 *text_end)
410 mutex_lock(&text_mutex);
411 for (poff = start; poff < end; poff++) {
412 u8 *ptr = (u8 *)poff + *poff;
414 if (!*poff || ptr < text || ptr >= text_end)
416 /* turn DS segment override prefix into lock prefix */
418 text_poke(ptr, ((unsigned char []){0xf0}), 1);
420 mutex_unlock(&text_mutex);
423 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
424 u8 *text, u8 *text_end)
428 mutex_lock(&text_mutex);
429 for (poff = start; poff < end; poff++) {
430 u8 *ptr = (u8 *)poff + *poff;
432 if (!*poff || ptr < text || ptr >= text_end)
434 /* turn lock prefix into DS segment override prefix */
436 text_poke(ptr, ((unsigned char []){0x3E}), 1);
438 mutex_unlock(&text_mutex);
441 struct smp_alt_module {
442 /* what is this ??? */
446 /* ptrs to lock prefixes */
448 const s32 *locks_end;
450 /* .text segment, needed to avoid patching init code ;) */
454 struct list_head next;
456 static LIST_HEAD(smp_alt_modules);
457 static DEFINE_MUTEX(smp_alt);
458 static bool uniproc_patched = false; /* protected by smp_alt */
460 void __init_or_module alternatives_smp_module_add(struct module *mod,
462 void *locks, void *locks_end,
463 void *text, void *text_end)
465 struct smp_alt_module *smp;
467 mutex_lock(&smp_alt);
468 if (!uniproc_patched)
471 if (num_possible_cpus() == 1)
472 /* Don't bother remembering, we'll never have to undo it. */
475 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
477 /* we'll run the (safe but slow) SMP code then ... */
483 smp->locks_end = locks_end;
485 smp->text_end = text_end;
486 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
487 smp->locks, smp->locks_end,
488 smp->text, smp->text_end, smp->name);
490 list_add_tail(&smp->next, &smp_alt_modules);
492 alternatives_smp_unlock(locks, locks_end, text, text_end);
494 mutex_unlock(&smp_alt);
497 void __init_or_module alternatives_smp_module_del(struct module *mod)
499 struct smp_alt_module *item;
501 mutex_lock(&smp_alt);
502 list_for_each_entry(item, &smp_alt_modules, next) {
503 if (mod != item->mod)
505 list_del(&item->next);
509 mutex_unlock(&smp_alt);
512 void alternatives_enable_smp(void)
514 struct smp_alt_module *mod;
516 #ifdef CONFIG_LOCKDEP
518 * Older binutils section handling bug prevented
519 * alternatives-replacement from working reliably.
521 * If this still occurs then you should see a hang
522 * or crash shortly after this line:
524 printk("lockdep: fixing up alternatives.\n");
527 /* Why bother if there are no other CPUs? */
528 BUG_ON(num_possible_cpus() == 1);
530 mutex_lock(&smp_alt);
532 if (uniproc_patched) {
533 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
534 BUG_ON(num_online_cpus() != 1);
535 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
536 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
537 list_for_each_entry(mod, &smp_alt_modules, next)
538 alternatives_smp_lock(mod->locks, mod->locks_end,
539 mod->text, mod->text_end);
540 uniproc_patched = false;
542 mutex_unlock(&smp_alt);
545 /* Return 1 if the address range is reserved for smp-alternatives */
546 int alternatives_text_reserved(void *start, void *end)
548 struct smp_alt_module *mod;
550 u8 *text_start = start;
553 list_for_each_entry(mod, &smp_alt_modules, next) {
554 if (mod->text > text_end || mod->text_end < text_start)
556 for (poff = mod->locks; poff < mod->locks_end; poff++) {
557 const u8 *ptr = (const u8 *)poff + *poff;
559 if (text_start <= ptr && text_end > ptr)
566 #endif /* CONFIG_SMP */
568 #ifdef CONFIG_PARAVIRT
569 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
570 struct paravirt_patch_site *end)
572 struct paravirt_patch_site *p;
573 char insnbuf[MAX_PATCH_LEN];
575 if (noreplace_paravirt)
578 for (p = start; p < end; p++) {
581 BUG_ON(p->len > MAX_PATCH_LEN);
582 /* prep the buffer with the original instructions */
583 memcpy(insnbuf, p->instr, p->len);
584 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
585 (unsigned long)p->instr, p->len);
587 BUG_ON(used > p->len);
589 /* Pad the rest with nops */
590 add_nops(insnbuf + used, p->len - used);
591 text_poke_early(p->instr, insnbuf, p->len);
594 extern struct paravirt_patch_site __start_parainstructions[],
595 __stop_parainstructions[];
596 #endif /* CONFIG_PARAVIRT */
598 void __init alternative_instructions(void)
600 /* The patching is not fully atomic, so try to avoid local interruptions
601 that might execute the to be patched code.
602 Other CPUs are not running. */
606 * Don't stop machine check exceptions while patching.
607 * MCEs only happen when something got corrupted and in this
608 * case we must do something about the corruption.
609 * Ignoring it is worse than a unlikely patching race.
610 * Also machine checks tend to be broadcast and if one CPU
611 * goes into machine check the others follow quickly, so we don't
612 * expect a machine check to cause undue problems during to code
616 apply_alternatives(__alt_instructions, __alt_instructions_end);
619 /* Patch to UP if other cpus not imminent. */
620 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
621 uniproc_patched = true;
622 alternatives_smp_module_add(NULL, "core kernel",
623 __smp_locks, __smp_locks_end,
627 if (!uniproc_patched || num_possible_cpus() == 1)
628 free_init_pages("SMP alternatives",
629 (unsigned long)__smp_locks,
630 (unsigned long)__smp_locks_end);
633 apply_paravirt(__parainstructions, __parainstructions_end);
639 * text_poke_early - Update instructions on a live kernel at boot time
640 * @addr: address to modify
641 * @opcode: source of the copy
642 * @len: length to copy
644 * When you use this code to patch more than one byte of an instruction
645 * you need to make sure that other CPUs cannot execute this code in parallel.
646 * Also no thread must be currently preempted in the middle of these
647 * instructions. And on the local CPU you need to be protected again NMI or MCE
648 * handlers seeing an inconsistent instruction while you patch.
650 void *__init_or_module text_poke_early(void *addr, const void *opcode,
654 local_irq_save(flags);
655 memcpy(addr, opcode, len);
657 local_irq_restore(flags);
658 /* Could also do a CLFLUSH here to speed up CPU recovery; but
659 that causes hangs on some VIA CPUs. */
664 * text_poke - Update instructions on a live kernel
665 * @addr: address to modify
666 * @opcode: source of the copy
667 * @len: length to copy
669 * Only atomic text poke/set should be allowed when not doing early patching.
670 * It means the size must be writable atomically and the address must be aligned
671 * in a way that permits an atomic write. It also makes sure we fit on a single
674 * Note: Must be called under text_mutex.
676 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
680 struct page *pages[2];
683 if (!core_kernel_text((unsigned long)addr)) {
684 pages[0] = vmalloc_to_page(addr);
685 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
687 pages[0] = virt_to_page(addr);
688 WARN_ON(!PageReserved(pages[0]));
689 pages[1] = virt_to_page(addr + PAGE_SIZE);
692 local_irq_save(flags);
693 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
695 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
696 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
697 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
698 clear_fixmap(FIX_TEXT_POKE0);
700 clear_fixmap(FIX_TEXT_POKE1);
703 /* Could also do a CLFLUSH here to speed up CPU recovery; but
704 that causes hangs on some VIA CPUs. */
705 for (i = 0; i < len; i++)
706 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
707 local_irq_restore(flags);
712 * Cross-modifying kernel text with stop_machine().
713 * This code originally comes from immediate value.
715 static atomic_t stop_machine_first;
716 static int wrote_text;
718 struct text_poke_params {
719 struct text_poke_param *params;
723 static int __kprobes stop_machine_text_poke(void *data)
725 struct text_poke_params *tpp = data;
726 struct text_poke_param *p;
729 if (atomic_dec_and_test(&stop_machine_first)) {
730 for (i = 0; i < tpp->nparams; i++) {
732 text_poke(p->addr, p->opcode, p->len);
734 smp_wmb(); /* Make sure other cpus see that this has run */
739 smp_mb(); /* Load wrote_text before following execution */
742 for (i = 0; i < tpp->nparams; i++) {
744 flush_icache_range((unsigned long)p->addr,
745 (unsigned long)p->addr + p->len);
748 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
749 * that a core serializing instruction such as "cpuid" should be
750 * executed on _each_ core before the new instruction is made visible.
757 * text_poke_smp - Update instructions on a live kernel on SMP
758 * @addr: address to modify
759 * @opcode: source of the copy
760 * @len: length to copy
762 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
763 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
764 * should be allowed, since stop_machine() does _not_ protect code against
767 * Note: Must be called under get_online_cpus() and text_mutex.
769 void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
771 struct text_poke_params tpp;
772 struct text_poke_param p;
779 atomic_set(&stop_machine_first, 1);
781 /* Use __stop_machine() because the caller already got online_cpus. */
782 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
787 * text_poke_smp_batch - Update instructions on a live kernel on SMP
788 * @params: an array of text_poke parameters
789 * @n: the number of elements in params.
791 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
792 * stop_machine() is heavy task, it is better to aggregate text_poke requests
793 * and do it once if possible.
795 * Note: Must be called under get_online_cpus() and text_mutex.
797 void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
799 struct text_poke_params tpp = {.params = params, .nparams = n};
801 atomic_set(&stop_machine_first, 1);
803 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);