2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8 * Added function graph tracer code, taken from x86 that was written
9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
13 #include <linux/spinlock.h>
14 #include <linux/hardirq.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
22 #include <asm/cacheflush.h>
23 #include <asm/code-patching.h>
24 #include <asm/ftrace.h>
27 #ifdef CONFIG_DYNAMIC_FTRACE
28 static unsigned int ftrace_nop_replace(void)
34 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
38 addr = ppc_function_entry((void *)addr);
40 /* if (link) set op to 'bl' else 'b' */
41 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
47 # define _ASM_ALIGN " .align 3 "
48 # define _ASM_PTR " .llong "
50 # define _ASM_ALIGN " .align 2 "
51 # define _ASM_PTR " .long "
55 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
57 unsigned int replaced;
60 * Note: Due to modules and __init, code can
61 * disappear and change, we need to protect against faulting
62 * as well as code changing. We do this by using the
63 * probe_kernel_* functions.
65 * No real locking needed, this code is run through
66 * kstop_machine, or before SMP starts.
69 /* read the text we want to modify */
70 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
73 /* Make sure it is what we expect it to be */
77 /* replace the text with the new text */
78 if (probe_kernel_write((void *)ip, &new, MCOUNT_INSN_SIZE))
81 flush_icache_range(ip, ip + 8);
87 * Helper functions that are the same for both PPC64 and PPC32.
89 static int test_24bit_addr(unsigned long ip, unsigned long addr)
92 /* use the create_branch to verify that this offset can be branched */
93 return create_branch((unsigned int *)ip, addr, 0);
98 static int is_bl_op(unsigned int op)
100 return (op & 0xfc000003) == 0x48000001;
103 static unsigned long find_bl_target(unsigned long ip, unsigned int op)
107 offset = (op & 0x03fffffc);
109 if (offset & 0x02000000)
110 offset |= 0xfe000000;
112 return ip + (long)offset;
117 __ftrace_make_nop(struct module *mod,
118 struct dyn_ftrace *rec, unsigned long addr)
123 unsigned long ip = rec->ip;
127 /* read where this goes */
128 if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
131 /* Make sure that that this is still a 24bit jump */
133 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
137 /* lets find where the pointer goes */
138 tramp = find_bl_target(ip, op);
141 * On PPC64 the trampoline looks like:
142 * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
143 * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
144 * Where the bytes 2,3,6 and 7 make up the 32bit offset
145 * to the TOC that holds the pointer.
147 * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
148 * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
149 * The actually address is 32 bytes from the offset
151 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
154 pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
156 /* Find where the trampoline jumps to */
157 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
158 printk(KERN_ERR "Failed to read %lx\n", tramp);
162 pr_devel(" %08x %08x", jmp[0], jmp[1]);
164 /* verify that this is what we expect it to be */
165 if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
166 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
167 (jmp[2] != 0xf8410028) ||
168 (jmp[3] != 0xe96c0020) ||
169 (jmp[4] != 0xe84c0028)) {
170 printk(KERN_ERR "Not a trampoline\n");
174 /* The bottom half is signed extended */
175 offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
176 (int)((short)jmp[1]);
178 pr_devel(" %x ", offset);
180 /* get the address this jumps too */
181 tramp = mod->arch.toc + offset + 32;
182 pr_devel("toc: %lx", tramp);
184 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
185 printk(KERN_ERR "Failed to read %lx\n", tramp);
189 pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
191 ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
193 /* This should match what was called */
194 if (ptr != ppc_function_entry((void *)addr)) {
195 printk(KERN_ERR "addr does not match %lx\n", ptr);
200 * We want to nop the line, but the next line is
201 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
202 * This needs to be turned to a nop too.
204 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
207 if (op != 0xe8410028) {
208 printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
213 * Milton Miller pointed out that we can not blindly do nops.
214 * If a task was preempted when calling a trace function,
215 * the nops will remove the way to restore the TOC in r2
216 * and the r2 TOC will get corrupted.
221 * bl <tramp> <==== will be replaced with "b 1f"
225 op = 0x48000008; /* b +8 */
227 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
231 flush_icache_range(ip, ip + 8);
238 __ftrace_make_nop(struct module *mod,
239 struct dyn_ftrace *rec, unsigned long addr)
243 unsigned long ip = rec->ip;
246 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
249 /* Make sure that that this is still a 24bit jump */
251 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
255 /* lets find where the pointer goes */
256 tramp = find_bl_target(ip, op);
259 * On PPC32 the trampoline looks like:
260 * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
261 * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
262 * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
263 * 0x4e, 0x80, 0x04, 0x20 bctr
266 pr_devel("ip:%lx jumps to %lx", ip, tramp);
268 /* Find where the trampoline jumps to */
269 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
270 printk(KERN_ERR "Failed to read %lx\n", tramp);
274 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
276 /* verify that this is what we expect it to be */
277 if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
278 ((jmp[1] & 0xffff0000) != 0x396b0000) ||
279 (jmp[2] != 0x7d6903a6) ||
280 (jmp[3] != 0x4e800420)) {
281 printk(KERN_ERR "Not a trampoline\n");
285 tramp = (jmp[1] & 0xffff) |
286 ((jmp[0] & 0xffff) << 16);
290 pr_devel(" %lx ", tramp);
294 "Trampoline location %08lx does not match addr\n",
301 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
304 flush_icache_range(ip, ip + 8);
309 #endif /* CONFIG_MODULES */
311 int ftrace_make_nop(struct module *mod,
312 struct dyn_ftrace *rec, unsigned long addr)
314 unsigned long ip = rec->ip;
315 unsigned int old, new;
318 * If the calling address is more that 24 bits away,
319 * then we had to use a trampoline to make the call.
320 * Otherwise just update the call site.
322 if (test_24bit_addr(ip, addr)) {
324 old = ftrace_call_replace(ip, addr, 1);
325 new = ftrace_nop_replace();
326 return ftrace_modify_code(ip, old, new);
329 #ifdef CONFIG_MODULES
331 * Out of range jumps are called from modules.
332 * We should either already have a pointer to the module
333 * or it has been passed in.
335 if (!rec->arch.mod) {
337 printk(KERN_ERR "No module loaded addr=%lx\n",
343 if (mod != rec->arch.mod) {
345 "Record mod %p not equal to passed in mod %p\n",
349 /* nothing to do if mod == rec->arch.mod */
353 return __ftrace_make_nop(mod, rec, addr);
355 /* We should not get here without modules */
357 #endif /* CONFIG_MODULES */
360 #ifdef CONFIG_MODULES
363 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
366 unsigned long ip = rec->ip;
368 /* read where this goes */
369 if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
373 * It should be pointing to two nops or
376 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
377 ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
378 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
382 /* If we never set up a trampoline to ftrace_caller, then bail */
383 if (!rec->arch.mod->arch.tramp) {
384 printk(KERN_ERR "No ftrace trampoline\n");
388 /* create the branch to the trampoline */
389 op[0] = create_branch((unsigned int *)ip,
390 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
392 printk(KERN_ERR "REL24 out of range!\n");
399 pr_devel("write to %lx\n", rec->ip);
401 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
404 flush_icache_range(ip, ip + 8);
410 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
413 unsigned long ip = rec->ip;
415 /* read where this goes */
416 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
419 /* It should be pointing to a nop */
420 if (op != PPC_INST_NOP) {
421 printk(KERN_ERR "Expected NOP but have %x\n", op);
425 /* If we never set up a trampoline to ftrace_caller, then bail */
426 if (!rec->arch.mod->arch.tramp) {
427 printk(KERN_ERR "No ftrace trampoline\n");
431 /* create the branch to the trampoline */
432 op = create_branch((unsigned int *)ip,
433 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
435 printk(KERN_ERR "REL24 out of range!\n");
439 pr_devel("write to %lx\n", rec->ip);
441 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
444 flush_icache_range(ip, ip + 8);
448 #endif /* CONFIG_PPC64 */
449 #endif /* CONFIG_MODULES */
451 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
453 unsigned long ip = rec->ip;
454 unsigned int old, new;
457 * If the calling address is more that 24 bits away,
458 * then we had to use a trampoline to make the call.
459 * Otherwise just update the call site.
461 if (test_24bit_addr(ip, addr)) {
463 old = ftrace_nop_replace();
464 new = ftrace_call_replace(ip, addr, 1);
465 return ftrace_modify_code(ip, old, new);
468 #ifdef CONFIG_MODULES
470 * Out of range jumps are called from modules.
471 * Being that we are converting from nop, it had better
472 * already have a module defined.
474 if (!rec->arch.mod) {
475 printk(KERN_ERR "No module loaded\n");
479 return __ftrace_make_call(rec, addr);
481 /* We should not get here without modules */
483 #endif /* CONFIG_MODULES */
486 int ftrace_update_ftrace_func(ftrace_func_t func)
488 unsigned long ip = (unsigned long)(&ftrace_call);
489 unsigned int old, new;
492 old = *(unsigned int *)&ftrace_call;
493 new = ftrace_call_replace(ip, (unsigned long)func, 1);
494 ret = ftrace_modify_code(ip, old, new);
499 int __init ftrace_dyn_arch_init(void *data)
501 /* caller expects data to be zero */
502 unsigned long *p = data;
508 #endif /* CONFIG_DYNAMIC_FTRACE */
510 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
512 #ifdef CONFIG_DYNAMIC_FTRACE
513 extern void ftrace_graph_call(void);
514 extern void ftrace_graph_stub(void);
516 int ftrace_enable_ftrace_graph_caller(void)
518 unsigned long ip = (unsigned long)(&ftrace_graph_call);
519 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
520 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
521 unsigned int old, new;
523 old = ftrace_call_replace(ip, stub, 0);
524 new = ftrace_call_replace(ip, addr, 0);
526 return ftrace_modify_code(ip, old, new);
529 int ftrace_disable_ftrace_graph_caller(void)
531 unsigned long ip = (unsigned long)(&ftrace_graph_call);
532 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
533 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
534 unsigned int old, new;
536 old = ftrace_call_replace(ip, addr, 0);
537 new = ftrace_call_replace(ip, stub, 0);
539 return ftrace_modify_code(ip, old, new);
541 #endif /* CONFIG_DYNAMIC_FTRACE */
544 extern void mod_return_to_handler(void);
548 * Hook the return address and push it in the stack of return addrs
549 * in current thread info.
551 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
555 struct ftrace_graph_ent trace;
556 unsigned long return_hooker = (unsigned long)&return_to_handler;
558 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
562 /* non core kernel code needs to save and restore the TOC */
563 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
564 return_hooker = (unsigned long)&mod_return_to_handler;
567 return_hooker = ppc_function_entry((void *)return_hooker);
570 * Protect against fault, even if it shouldn't
571 * happen. This tool is too much intrusive to
572 * ignore such a protection.
575 "1: " PPC_LL "%[old], 0(%[parent])\n"
576 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
577 " li %[faulted], 0\n"
580 ".section .fixup, \"ax\"\n"
581 "4: li %[faulted], 1\n"
585 ".section __ex_table,\"a\"\n"
591 : [old] "=&r" (old), [faulted] "=r" (faulted)
592 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
596 if (unlikely(faulted)) {
602 if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
607 trace.func = self_addr;
609 /* Only trace if the calling function expects to */
610 if (!ftrace_graph_entry(&trace)) {
611 current->curr_ret_stack--;
615 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */