1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
20 #include <asm/fixmap.h>
22 #define MAX_PATCH_LEN (255-1)
24 #ifdef CONFIG_HOTPLUG_CPU
25 static int smp_alt_once;
27 static int __init bootonly(char *str)
32 __setup("smp-alt-boot", bootonly);
34 #define smp_alt_once 1
37 static int __initdata_or_module debug_alternative;
39 static int __init debug_alt(char *str)
41 debug_alternative = 1;
44 __setup("debug-alternative", debug_alt);
46 static int noreplace_smp;
48 static int __init setup_noreplace_smp(char *str)
53 __setup("noreplace-smp", setup_noreplace_smp);
55 #ifdef CONFIG_PARAVIRT
56 static int __initdata_or_module noreplace_paravirt = 0;
58 static int __init setup_noreplace_paravirt(char *str)
60 noreplace_paravirt = 1;
63 __setup("noreplace-paravirt", setup_noreplace_paravirt);
66 #define DPRINTK(fmt, args...) \
68 if (debug_alternative) \
69 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
72 #define DUMP_BYTES(buf, len, fmt, args...) \
74 if (unlikely(debug_alternative)) { \
80 printk(KERN_DEBUG fmt, ##args); \
81 for (j = 0; j < (len) - 1; j++) \
82 printk(KERN_CONT "%02hhx ", buf[j]); \
83 printk(KERN_CONT "%02hhx\n", buf[j]); \
88 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
89 * that correspond to that nop. Getting from one nop to the next, we
90 * add to the array the offset that is equal to the sum of all sizes of
91 * nops preceding the one we are after.
93 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
94 * nice symmetry of sizes of the previous nops.
96 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
97 static const unsigned char intelnops[] =
109 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
115 intelnops + 1 + 2 + 3,
116 intelnops + 1 + 2 + 3 + 4,
117 intelnops + 1 + 2 + 3 + 4 + 5,
118 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
119 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
120 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
125 static const unsigned char k8nops[] =
137 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
144 k8nops + 1 + 2 + 3 + 4,
145 k8nops + 1 + 2 + 3 + 4 + 5,
146 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
147 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
148 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
152 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
153 static const unsigned char k7nops[] =
165 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
172 k7nops + 1 + 2 + 3 + 4,
173 k7nops + 1 + 2 + 3 + 4 + 5,
174 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
175 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
176 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
181 static const unsigned char p6nops[] =
193 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
200 p6nops + 1 + 2 + 3 + 4,
201 p6nops + 1 + 2 + 3 + 4 + 5,
202 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
203 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
204 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
208 /* Initialize these to a safe default */
210 const unsigned char * const *ideal_nops = p6_nops;
212 const unsigned char * const *ideal_nops = intel_nops;
215 void __init arch_init_ideal_nops(void)
217 switch (boot_cpu_data.x86_vendor) {
218 case X86_VENDOR_INTEL:
220 * Due to a decoder implementation quirk, some
221 * specific Intel CPUs actually perform better with
222 * the "k8_nops" than with the SDM-recommended NOPs.
224 if (boot_cpu_data.x86 == 6 &&
225 boot_cpu_data.x86_model >= 0x0f &&
226 boot_cpu_data.x86_model != 0x1c &&
227 boot_cpu_data.x86_model != 0x26 &&
228 boot_cpu_data.x86_model != 0x27 &&
229 boot_cpu_data.x86_model < 0x30) {
230 ideal_nops = k8_nops;
231 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
232 ideal_nops = p6_nops;
235 ideal_nops = k8_nops;
237 ideal_nops = intel_nops;
243 ideal_nops = k8_nops;
245 if (boot_cpu_has(X86_FEATURE_K8))
246 ideal_nops = k8_nops;
247 else if (boot_cpu_has(X86_FEATURE_K7))
248 ideal_nops = k7_nops;
250 ideal_nops = intel_nops;
255 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
256 static void __init_or_module add_nops(void *insns, unsigned int len)
259 unsigned int noplen = len;
260 if (noplen > ASM_NOP_MAX)
261 noplen = ASM_NOP_MAX;
262 memcpy(insns, ideal_nops[noplen], noplen);
268 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
269 extern s32 __smp_locks[], __smp_locks_end[];
270 void *text_poke_early(void *addr, const void *opcode, size_t len);
273 * Are we looking at a near JMP with a 1 or 4-byte displacement.
275 static inline bool is_jmp(const u8 opcode)
277 return opcode == 0xeb || opcode == 0xe9;
280 static void __init_or_module
281 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
283 u8 *next_rip, *tgt_rip;
287 if (a->replacementlen != 5)
290 o_dspl = *(s32 *)(insnbuf + 1);
292 /* next_rip of the replacement JMP */
293 next_rip = repl_insn + a->replacementlen;
294 /* target rip of the replacement JMP */
295 tgt_rip = next_rip + o_dspl;
296 n_dspl = tgt_rip - orig_insn;
298 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
300 if (tgt_rip - orig_insn >= 0) {
301 if (n_dspl - 2 <= 127)
305 /* negative offset */
307 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
317 insnbuf[1] = (s8)n_dspl;
318 add_nops(insnbuf + 2, 3);
327 *(s32 *)&insnbuf[1] = n_dspl;
333 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
334 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
338 * Replace instructions with better alternatives for this CPU type. This runs
339 * before SMP is initialized to avoid SMP problems with self modifying code.
340 * This implies that asymmetric systems where APs have less capabilities than
341 * the boot processor are not handled. Tough. Make sure you disable such
344 void __init_or_module apply_alternatives(struct alt_instr *start,
345 struct alt_instr *end)
348 u8 *instr, *replacement;
349 u8 insnbuf[MAX_PATCH_LEN];
351 DPRINTK("alt table %p -> %p", start, end);
353 * The scan order should be from start to end. A later scanned
354 * alternative code can overwrite previously scanned alternative code.
355 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
358 * So be careful if you want to change the scan order to any other
361 for (a = start; a < end; a++) {
364 instr = (u8 *)&a->instr_offset + a->instr_offset;
365 replacement = (u8 *)&a->repl_offset + a->repl_offset;
366 BUG_ON(a->instrlen > sizeof(insnbuf));
367 BUG_ON(a->cpuid >= NCAPINTS*32);
368 if (!boot_cpu_has(a->cpuid))
371 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d)",
375 replacement, a->replacementlen);
377 DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
378 DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
380 memcpy(insnbuf, replacement, a->replacementlen);
381 insnbuf_sz = a->replacementlen;
383 /* 0xe8 is a relative jump; fix the offset. */
384 if (*insnbuf == 0xe8 && a->replacementlen == 5) {
385 *(s32 *)(insnbuf + 1) += replacement - instr;
386 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
387 *(s32 *)(insnbuf + 1),
388 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
391 if (a->replacementlen && is_jmp(replacement[0]))
392 recompute_jump(a, instr, replacement, insnbuf);
394 if (a->instrlen > a->replacementlen) {
395 add_nops(insnbuf + a->replacementlen,
396 a->instrlen - a->replacementlen);
397 insnbuf_sz += a->instrlen - a->replacementlen;
399 DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
401 text_poke_early(instr, insnbuf, insnbuf_sz);
406 static void alternatives_smp_lock(const s32 *start, const s32 *end,
407 u8 *text, u8 *text_end)
411 mutex_lock(&text_mutex);
412 for (poff = start; poff < end; poff++) {
413 u8 *ptr = (u8 *)poff + *poff;
415 if (!*poff || ptr < text || ptr >= text_end)
417 /* turn DS segment override prefix into lock prefix */
419 text_poke(ptr, ((unsigned char []){0xf0}), 1);
421 mutex_unlock(&text_mutex);
424 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
425 u8 *text, u8 *text_end)
432 mutex_lock(&text_mutex);
433 for (poff = start; poff < end; poff++) {
434 u8 *ptr = (u8 *)poff + *poff;
436 if (!*poff || ptr < text || ptr >= text_end)
438 /* turn lock prefix into DS segment override prefix */
440 text_poke(ptr, ((unsigned char []){0x3E}), 1);
442 mutex_unlock(&text_mutex);
445 struct smp_alt_module {
446 /* what is this ??? */
450 /* ptrs to lock prefixes */
452 const s32 *locks_end;
454 /* .text segment, needed to avoid patching init code ;) */
458 struct list_head next;
460 static LIST_HEAD(smp_alt_modules);
461 static DEFINE_MUTEX(smp_alt);
462 static int smp_mode = 1; /* protected by smp_alt */
464 void __init_or_module alternatives_smp_module_add(struct module *mod,
466 void *locks, void *locks_end,
467 void *text, void *text_end)
469 struct smp_alt_module *smp;
475 if (boot_cpu_has(X86_FEATURE_UP))
476 alternatives_smp_unlock(locks, locks_end,
481 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
483 return; /* we'll run the (safe but slow) SMP code then ... */
488 smp->locks_end = locks_end;
490 smp->text_end = text_end;
491 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
492 smp->locks, smp->locks_end,
493 smp->text, smp->text_end, smp->name);
495 mutex_lock(&smp_alt);
496 list_add_tail(&smp->next, &smp_alt_modules);
497 if (boot_cpu_has(X86_FEATURE_UP))
498 alternatives_smp_unlock(smp->locks, smp->locks_end,
499 smp->text, smp->text_end);
500 mutex_unlock(&smp_alt);
503 void __init_or_module alternatives_smp_module_del(struct module *mod)
505 struct smp_alt_module *item;
507 if (smp_alt_once || noreplace_smp)
510 mutex_lock(&smp_alt);
511 list_for_each_entry(item, &smp_alt_modules, next) {
512 if (mod != item->mod)
514 list_del(&item->next);
515 mutex_unlock(&smp_alt);
516 DPRINTK("%s\n", item->name);
520 mutex_unlock(&smp_alt);
523 bool skip_smp_alternatives;
524 void alternatives_smp_switch(int smp)
526 struct smp_alt_module *mod;
528 #ifdef CONFIG_LOCKDEP
530 * Older binutils section handling bug prevented
531 * alternatives-replacement from working reliably.
533 * If this still occurs then you should see a hang
534 * or crash shortly after this line:
536 printk("lockdep: fixing up alternatives.\n");
539 if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
541 BUG_ON(!smp && (num_online_cpus() > 1));
543 mutex_lock(&smp_alt);
546 * Avoid unnecessary switches because it forces JIT based VMs to
547 * throw away all cached translations, which can be quite costly.
549 if (smp == smp_mode) {
552 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
553 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
554 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
555 list_for_each_entry(mod, &smp_alt_modules, next)
556 alternatives_smp_lock(mod->locks, mod->locks_end,
557 mod->text, mod->text_end);
559 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
560 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
561 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
562 list_for_each_entry(mod, &smp_alt_modules, next)
563 alternatives_smp_unlock(mod->locks, mod->locks_end,
564 mod->text, mod->text_end);
567 mutex_unlock(&smp_alt);
570 /* Return 1 if the address range is reserved for smp-alternatives */
571 int alternatives_text_reserved(void *start, void *end)
573 struct smp_alt_module *mod;
575 u8 *text_start = start;
578 list_for_each_entry(mod, &smp_alt_modules, next) {
579 if (mod->text > text_end || mod->text_end < text_start)
581 for (poff = mod->locks; poff < mod->locks_end; poff++) {
582 const u8 *ptr = (const u8 *)poff + *poff;
584 if (text_start <= ptr && text_end > ptr)
591 #endif /* CONFIG_SMP */
593 #ifdef CONFIG_PARAVIRT
594 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
595 struct paravirt_patch_site *end)
597 struct paravirt_patch_site *p;
598 char insnbuf[MAX_PATCH_LEN];
600 if (noreplace_paravirt)
603 for (p = start; p < end; p++) {
606 BUG_ON(p->len > MAX_PATCH_LEN);
607 /* prep the buffer with the original instructions */
608 memcpy(insnbuf, p->instr, p->len);
609 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
610 (unsigned long)p->instr, p->len);
612 BUG_ON(used > p->len);
614 /* Pad the rest with nops */
615 add_nops(insnbuf + used, p->len - used);
616 text_poke_early(p->instr, insnbuf, p->len);
619 extern struct paravirt_patch_site __start_parainstructions[],
620 __stop_parainstructions[];
621 #endif /* CONFIG_PARAVIRT */
623 void __init alternative_instructions(void)
625 /* The patching is not fully atomic, so try to avoid local interruptions
626 that might execute the to be patched code.
627 Other CPUs are not running. */
631 * Don't stop machine check exceptions while patching.
632 * MCEs only happen when something got corrupted and in this
633 * case we must do something about the corruption.
634 * Ignoring it is worse than a unlikely patching race.
635 * Also machine checks tend to be broadcast and if one CPU
636 * goes into machine check the others follow quickly, so we don't
637 * expect a machine check to cause undue problems during to code
641 apply_alternatives(__alt_instructions, __alt_instructions_end);
643 /* switch to patch-once-at-boottime-only mode and free the
644 * tables in case we know the number of CPUs will never ever
646 #ifdef CONFIG_HOTPLUG_CPU
647 if (num_possible_cpus() < 2)
653 if (1 == num_possible_cpus()) {
654 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
655 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
656 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
658 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
662 alternatives_smp_module_add(NULL, "core kernel",
663 __smp_locks, __smp_locks_end,
666 /* Only switch to UP mode if we don't immediately boot others */
667 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
668 alternatives_smp_switch(0);
671 apply_paravirt(__parainstructions, __parainstructions_end);
674 free_init_pages("SMP alternatives",
675 (unsigned long)__smp_locks,
676 (unsigned long)__smp_locks_end);
682 * text_poke_early - Update instructions on a live kernel at boot time
683 * @addr: address to modify
684 * @opcode: source of the copy
685 * @len: length to copy
687 * When you use this code to patch more than one byte of an instruction
688 * you need to make sure that other CPUs cannot execute this code in parallel.
689 * Also no thread must be currently preempted in the middle of these
690 * instructions. And on the local CPU you need to be protected again NMI or MCE
691 * handlers seeing an inconsistent instruction while you patch.
693 void *__init_or_module text_poke_early(void *addr, const void *opcode,
697 local_irq_save(flags);
698 memcpy(addr, opcode, len);
700 local_irq_restore(flags);
701 /* Could also do a CLFLUSH here to speed up CPU recovery; but
702 that causes hangs on some VIA CPUs. */
707 * text_poke - Update instructions on a live kernel
708 * @addr: address to modify
709 * @opcode: source of the copy
710 * @len: length to copy
712 * Only atomic text poke/set should be allowed when not doing early patching.
713 * It means the size must be writable atomically and the address must be aligned
714 * in a way that permits an atomic write. It also makes sure we fit on a single
717 * Note: Must be called under text_mutex.
719 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
723 struct page *pages[2];
726 if (!core_kernel_text((unsigned long)addr)) {
727 pages[0] = vmalloc_to_page(addr);
728 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
730 pages[0] = virt_to_page(addr);
731 WARN_ON(!PageReserved(pages[0]));
732 pages[1] = virt_to_page(addr + PAGE_SIZE);
735 local_irq_save(flags);
736 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
738 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
739 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
740 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
741 clear_fixmap(FIX_TEXT_POKE0);
743 clear_fixmap(FIX_TEXT_POKE1);
746 /* Could also do a CLFLUSH here to speed up CPU recovery; but
747 that causes hangs on some VIA CPUs. */
748 for (i = 0; i < len; i++)
749 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
750 local_irq_restore(flags);
755 * Cross-modifying kernel text with stop_machine().
756 * This code originally comes from immediate value.
758 static atomic_t stop_machine_first;
759 static int wrote_text;
761 struct text_poke_params {
762 struct text_poke_param *params;
766 static int __kprobes stop_machine_text_poke(void *data)
768 struct text_poke_params *tpp = data;
769 struct text_poke_param *p;
772 if (atomic_dec_and_test(&stop_machine_first)) {
773 for (i = 0; i < tpp->nparams; i++) {
775 text_poke(p->addr, p->opcode, p->len);
777 smp_wmb(); /* Make sure other cpus see that this has run */
782 smp_mb(); /* Load wrote_text before following execution */
785 for (i = 0; i < tpp->nparams; i++) {
787 flush_icache_range((unsigned long)p->addr,
788 (unsigned long)p->addr + p->len);
791 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
792 * that a core serializing instruction such as "cpuid" should be
793 * executed on _each_ core before the new instruction is made visible.
800 * text_poke_smp - Update instructions on a live kernel on SMP
801 * @addr: address to modify
802 * @opcode: source of the copy
803 * @len: length to copy
805 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
806 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
807 * should be allowed, since stop_machine() does _not_ protect code against
810 * Note: Must be called under get_online_cpus() and text_mutex.
812 void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
814 struct text_poke_params tpp;
815 struct text_poke_param p;
822 atomic_set(&stop_machine_first, 1);
824 /* Use __stop_machine() because the caller already got online_cpus. */
825 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
830 * text_poke_smp_batch - Update instructions on a live kernel on SMP
831 * @params: an array of text_poke parameters
832 * @n: the number of elements in params.
834 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
835 * stop_machine() is heavy task, it is better to aggregate text_poke requests
836 * and do it once if possible.
838 * Note: Must be called under get_online_cpus() and text_mutex.
840 void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
842 struct text_poke_params tpp = {.params = params, .nparams = n};
844 atomic_set(&stop_machine_first, 1);
846 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);