1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
20 #include <asm/fixmap.h>
22 #define MAX_PATCH_LEN (255-1)
24 #ifdef CONFIG_HOTPLUG_CPU
25 static int smp_alt_once;
27 static int __init bootonly(char *str)
32 __setup("smp-alt-boot", bootonly);
34 #define smp_alt_once 1
37 static int __initdata_or_module debug_alternative;
39 static int __init debug_alt(char *str)
41 debug_alternative = 1;
44 __setup("debug-alternative", debug_alt);
46 static int noreplace_smp;
48 static int __init setup_noreplace_smp(char *str)
53 __setup("noreplace-smp", setup_noreplace_smp);
55 #ifdef CONFIG_PARAVIRT
56 static int __initdata_or_module noreplace_paravirt = 0;
58 static int __init setup_noreplace_paravirt(char *str)
60 noreplace_paravirt = 1;
63 __setup("noreplace-paravirt", setup_noreplace_paravirt);
66 #define DPRINTK(fmt, args...) \
68 if (debug_alternative) \
69 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
73 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
74 * that correspond to that nop. Getting from one nop to the next, we
75 * add to the array the offset that is equal to the sum of all sizes of
76 * nops preceding the one we are after.
78 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
79 * nice symmetry of sizes of the previous nops.
81 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
82 static const unsigned char intelnops[] =
94 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
100 intelnops + 1 + 2 + 3,
101 intelnops + 1 + 2 + 3 + 4,
102 intelnops + 1 + 2 + 3 + 4 + 5,
103 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
104 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
105 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
110 static const unsigned char k8nops[] =
122 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
129 k8nops + 1 + 2 + 3 + 4,
130 k8nops + 1 + 2 + 3 + 4 + 5,
131 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
132 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
133 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
137 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
138 static const unsigned char k7nops[] =
150 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
157 k7nops + 1 + 2 + 3 + 4,
158 k7nops + 1 + 2 + 3 + 4 + 5,
159 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
160 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
161 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
166 static const unsigned char p6nops[] =
178 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
185 p6nops + 1 + 2 + 3 + 4,
186 p6nops + 1 + 2 + 3 + 4 + 5,
187 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
188 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
189 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
193 /* Initialize these to a safe default */
195 const unsigned char * const *ideal_nops = p6_nops;
197 const unsigned char * const *ideal_nops = intel_nops;
200 void __init arch_init_ideal_nops(void)
202 switch (boot_cpu_data.x86_vendor) {
203 case X86_VENDOR_INTEL:
205 * Due to a decoder implementation quirk, some
206 * specific Intel CPUs actually perform better with
207 * the "k8_nops" than with the SDM-recommended NOPs.
209 if (boot_cpu_data.x86 == 6 &&
210 boot_cpu_data.x86_model >= 0x0f &&
211 boot_cpu_data.x86_model != 0x1c &&
212 boot_cpu_data.x86_model != 0x26 &&
213 boot_cpu_data.x86_model != 0x27 &&
214 boot_cpu_data.x86_model < 0x30) {
215 ideal_nops = k8_nops;
216 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
217 ideal_nops = p6_nops;
220 ideal_nops = k8_nops;
222 ideal_nops = intel_nops;
228 ideal_nops = k8_nops;
230 if (boot_cpu_has(X86_FEATURE_K8))
231 ideal_nops = k8_nops;
232 else if (boot_cpu_has(X86_FEATURE_K7))
233 ideal_nops = k7_nops;
235 ideal_nops = intel_nops;
240 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
241 static void __init_or_module add_nops(void *insns, unsigned int len)
244 unsigned int noplen = len;
245 if (noplen > ASM_NOP_MAX)
246 noplen = ASM_NOP_MAX;
247 memcpy(insns, ideal_nops[noplen], noplen);
253 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
254 extern s32 __smp_locks[], __smp_locks_end[];
255 void *text_poke_early(void *addr, const void *opcode, size_t len);
258 * Replace instructions with better alternatives for this CPU type. This runs
259 * before SMP is initialized to avoid SMP problems with self modifying code.
260 * This implies that asymmetric systems where APs have less capabilities than
261 * the boot processor are not handled. Tough. Make sure you disable such
264 void __init_or_module apply_alternatives(struct alt_instr *start,
265 struct alt_instr *end)
268 u8 *instr, *replacement;
269 u8 insnbuf[MAX_PATCH_LEN];
271 DPRINTK("alt table %p -> %p", start, end);
273 * The scan order should be from start to end. A later scanned
274 * alternative code can overwrite previously scanned alternative code.
275 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
278 * So be careful if you want to change the scan order to any other
281 for (a = start; a < end; a++) {
282 instr = (u8 *)&a->instr_offset + a->instr_offset;
283 replacement = (u8 *)&a->repl_offset + a->repl_offset;
284 BUG_ON(a->instrlen > sizeof(insnbuf));
285 BUG_ON(a->cpuid >= NCAPINTS*32);
286 if (!boot_cpu_has(a->cpuid))
289 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d)",
293 replacement, a->replacementlen);
295 memcpy(insnbuf, replacement, a->replacementlen);
297 /* 0xe8 is a relative jump; fix the offset. */
298 if (*insnbuf == 0xe8 && a->replacementlen == 5) {
299 *(s32 *)(insnbuf + 1) += replacement - instr;
300 DPRINTK("Fix CALL offset: 0x%x", *(s32 *)(insnbuf + 1));
303 if (a->instrlen > a->replacementlen)
304 add_nops(insnbuf + a->replacementlen,
305 a->instrlen - a->replacementlen);
307 text_poke_early(instr, insnbuf, a->instrlen);
313 static void alternatives_smp_lock(const s32 *start, const s32 *end,
314 u8 *text, u8 *text_end)
318 mutex_lock(&text_mutex);
319 for (poff = start; poff < end; poff++) {
320 u8 *ptr = (u8 *)poff + *poff;
322 if (!*poff || ptr < text || ptr >= text_end)
324 /* turn DS segment override prefix into lock prefix */
326 text_poke(ptr, ((unsigned char []){0xf0}), 1);
328 mutex_unlock(&text_mutex);
331 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
332 u8 *text, u8 *text_end)
339 mutex_lock(&text_mutex);
340 for (poff = start; poff < end; poff++) {
341 u8 *ptr = (u8 *)poff + *poff;
343 if (!*poff || ptr < text || ptr >= text_end)
345 /* turn lock prefix into DS segment override prefix */
347 text_poke(ptr, ((unsigned char []){0x3E}), 1);
349 mutex_unlock(&text_mutex);
352 struct smp_alt_module {
353 /* what is this ??? */
357 /* ptrs to lock prefixes */
359 const s32 *locks_end;
361 /* .text segment, needed to avoid patching init code ;) */
365 struct list_head next;
367 static LIST_HEAD(smp_alt_modules);
368 static DEFINE_MUTEX(smp_alt);
369 static int smp_mode = 1; /* protected by smp_alt */
371 void __init_or_module alternatives_smp_module_add(struct module *mod,
373 void *locks, void *locks_end,
374 void *text, void *text_end)
376 struct smp_alt_module *smp;
382 if (boot_cpu_has(X86_FEATURE_UP))
383 alternatives_smp_unlock(locks, locks_end,
388 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
390 return; /* we'll run the (safe but slow) SMP code then ... */
395 smp->locks_end = locks_end;
397 smp->text_end = text_end;
398 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
399 smp->locks, smp->locks_end,
400 smp->text, smp->text_end, smp->name);
402 mutex_lock(&smp_alt);
403 list_add_tail(&smp->next, &smp_alt_modules);
404 if (boot_cpu_has(X86_FEATURE_UP))
405 alternatives_smp_unlock(smp->locks, smp->locks_end,
406 smp->text, smp->text_end);
407 mutex_unlock(&smp_alt);
410 void __init_or_module alternatives_smp_module_del(struct module *mod)
412 struct smp_alt_module *item;
414 if (smp_alt_once || noreplace_smp)
417 mutex_lock(&smp_alt);
418 list_for_each_entry(item, &smp_alt_modules, next) {
419 if (mod != item->mod)
421 list_del(&item->next);
422 mutex_unlock(&smp_alt);
423 DPRINTK("%s\n", item->name);
427 mutex_unlock(&smp_alt);
430 bool skip_smp_alternatives;
431 void alternatives_smp_switch(int smp)
433 struct smp_alt_module *mod;
435 #ifdef CONFIG_LOCKDEP
437 * Older binutils section handling bug prevented
438 * alternatives-replacement from working reliably.
440 * If this still occurs then you should see a hang
441 * or crash shortly after this line:
443 printk("lockdep: fixing up alternatives.\n");
446 if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
448 BUG_ON(!smp && (num_online_cpus() > 1));
450 mutex_lock(&smp_alt);
453 * Avoid unnecessary switches because it forces JIT based VMs to
454 * throw away all cached translations, which can be quite costly.
456 if (smp == smp_mode) {
459 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
460 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
461 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
462 list_for_each_entry(mod, &smp_alt_modules, next)
463 alternatives_smp_lock(mod->locks, mod->locks_end,
464 mod->text, mod->text_end);
466 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
467 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
468 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
469 list_for_each_entry(mod, &smp_alt_modules, next)
470 alternatives_smp_unlock(mod->locks, mod->locks_end,
471 mod->text, mod->text_end);
474 mutex_unlock(&smp_alt);
477 /* Return 1 if the address range is reserved for smp-alternatives */
478 int alternatives_text_reserved(void *start, void *end)
480 struct smp_alt_module *mod;
482 u8 *text_start = start;
485 list_for_each_entry(mod, &smp_alt_modules, next) {
486 if (mod->text > text_end || mod->text_end < text_start)
488 for (poff = mod->locks; poff < mod->locks_end; poff++) {
489 const u8 *ptr = (const u8 *)poff + *poff;
491 if (text_start <= ptr && text_end > ptr)
500 #ifdef CONFIG_PARAVIRT
501 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
502 struct paravirt_patch_site *end)
504 struct paravirt_patch_site *p;
505 char insnbuf[MAX_PATCH_LEN];
507 if (noreplace_paravirt)
510 for (p = start; p < end; p++) {
513 BUG_ON(p->len > MAX_PATCH_LEN);
514 /* prep the buffer with the original instructions */
515 memcpy(insnbuf, p->instr, p->len);
516 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
517 (unsigned long)p->instr, p->len);
519 BUG_ON(used > p->len);
521 /* Pad the rest with nops */
522 add_nops(insnbuf + used, p->len - used);
523 text_poke_early(p->instr, insnbuf, p->len);
526 extern struct paravirt_patch_site __start_parainstructions[],
527 __stop_parainstructions[];
528 #endif /* CONFIG_PARAVIRT */
530 void __init alternative_instructions(void)
532 /* The patching is not fully atomic, so try to avoid local interruptions
533 that might execute the to be patched code.
534 Other CPUs are not running. */
538 * Don't stop machine check exceptions while patching.
539 * MCEs only happen when something got corrupted and in this
540 * case we must do something about the corruption.
541 * Ignoring it is worse than a unlikely patching race.
542 * Also machine checks tend to be broadcast and if one CPU
543 * goes into machine check the others follow quickly, so we don't
544 * expect a machine check to cause undue problems during to code
548 apply_alternatives(__alt_instructions, __alt_instructions_end);
550 /* switch to patch-once-at-boottime-only mode and free the
551 * tables in case we know the number of CPUs will never ever
553 #ifdef CONFIG_HOTPLUG_CPU
554 if (num_possible_cpus() < 2)
560 if (1 == num_possible_cpus()) {
561 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
562 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
563 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
565 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
569 alternatives_smp_module_add(NULL, "core kernel",
570 __smp_locks, __smp_locks_end,
573 /* Only switch to UP mode if we don't immediately boot others */
574 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
575 alternatives_smp_switch(0);
578 apply_paravirt(__parainstructions, __parainstructions_end);
581 free_init_pages("SMP alternatives",
582 (unsigned long)__smp_locks,
583 (unsigned long)__smp_locks_end);
589 * text_poke_early - Update instructions on a live kernel at boot time
590 * @addr: address to modify
591 * @opcode: source of the copy
592 * @len: length to copy
594 * When you use this code to patch more than one byte of an instruction
595 * you need to make sure that other CPUs cannot execute this code in parallel.
596 * Also no thread must be currently preempted in the middle of these
597 * instructions. And on the local CPU you need to be protected again NMI or MCE
598 * handlers seeing an inconsistent instruction while you patch.
600 void *__init_or_module text_poke_early(void *addr, const void *opcode,
604 local_irq_save(flags);
605 memcpy(addr, opcode, len);
607 local_irq_restore(flags);
608 /* Could also do a CLFLUSH here to speed up CPU recovery; but
609 that causes hangs on some VIA CPUs. */
614 * text_poke - Update instructions on a live kernel
615 * @addr: address to modify
616 * @opcode: source of the copy
617 * @len: length to copy
619 * Only atomic text poke/set should be allowed when not doing early patching.
620 * It means the size must be writable atomically and the address must be aligned
621 * in a way that permits an atomic write. It also makes sure we fit on a single
624 * Note: Must be called under text_mutex.
626 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
630 struct page *pages[2];
633 if (!core_kernel_text((unsigned long)addr)) {
634 pages[0] = vmalloc_to_page(addr);
635 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
637 pages[0] = virt_to_page(addr);
638 WARN_ON(!PageReserved(pages[0]));
639 pages[1] = virt_to_page(addr + PAGE_SIZE);
642 local_irq_save(flags);
643 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
645 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
646 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
647 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
648 clear_fixmap(FIX_TEXT_POKE0);
650 clear_fixmap(FIX_TEXT_POKE1);
653 /* Could also do a CLFLUSH here to speed up CPU recovery; but
654 that causes hangs on some VIA CPUs. */
655 for (i = 0; i < len; i++)
656 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
657 local_irq_restore(flags);
662 * Cross-modifying kernel text with stop_machine().
663 * This code originally comes from immediate value.
665 static atomic_t stop_machine_first;
666 static int wrote_text;
668 struct text_poke_params {
669 struct text_poke_param *params;
673 static int __kprobes stop_machine_text_poke(void *data)
675 struct text_poke_params *tpp = data;
676 struct text_poke_param *p;
679 if (atomic_dec_and_test(&stop_machine_first)) {
680 for (i = 0; i < tpp->nparams; i++) {
682 text_poke(p->addr, p->opcode, p->len);
684 smp_wmb(); /* Make sure other cpus see that this has run */
689 smp_mb(); /* Load wrote_text before following execution */
692 for (i = 0; i < tpp->nparams; i++) {
694 flush_icache_range((unsigned long)p->addr,
695 (unsigned long)p->addr + p->len);
698 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
699 * that a core serializing instruction such as "cpuid" should be
700 * executed on _each_ core before the new instruction is made visible.
707 * text_poke_smp - Update instructions on a live kernel on SMP
708 * @addr: address to modify
709 * @opcode: source of the copy
710 * @len: length to copy
712 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
713 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
714 * should be allowed, since stop_machine() does _not_ protect code against
717 * Note: Must be called under get_online_cpus() and text_mutex.
719 void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
721 struct text_poke_params tpp;
722 struct text_poke_param p;
729 atomic_set(&stop_machine_first, 1);
731 /* Use __stop_machine() because the caller already got online_cpus. */
732 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
737 * text_poke_smp_batch - Update instructions on a live kernel on SMP
738 * @params: an array of text_poke parameters
739 * @n: the number of elements in params.
741 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
742 * stop_machine() is heavy task, it is better to aggregate text_poke requests
743 * and do it once if possible.
745 * Note: Must be called under get_online_cpus() and text_mutex.
747 void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
749 struct text_poke_params tpp = {.params = params, .nparams = n};
751 atomic_set(&stop_machine_first, 1);
753 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);