3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #include <linux/errno.h>
22 #include <asm/unistd.h>
23 #include <asm/processor.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/cputable.h>
30 #include <asm/firmware.h>
32 #include <asm/ptrace.h>
33 #include <asm/irqflags.h>
34 #include <asm/ftrace.h>
35 #include <asm/hw_irq.h>
42 .tc .sys_call_table[TC],.sys_call_table
44 /* This value is used to mark exception frames on the stack. */
46 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
53 .globl system_call_common
57 addi r1,r1,-INT_FRAME_SIZE
65 ACCOUNT_CPU_USER_ENTRY(r10, r11)
67 * This "crclr so" clears CR0.SO, which is the error indication on
68 * return from this system call. There must be no cmp instruction
69 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
70 * CR0.SO will get set, causing all system calls to appear to fail.
98 addi r9,r1,STACK_FRAME_OVERHEAD
99 ld r11,exception_marker@toc(r2)
100 std r11,-16(r9) /* "regshere" marker */
101 #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
104 /* if from user, see if there are any DTL entries to process */
105 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
106 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
107 ld r10,LPPACA_DTLIDX(r10) /* get log write index */
110 bl .accumulate_stolen_time
114 addi r9,r1,STACK_FRAME_OVERHEAD
116 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
117 #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
120 * A syscall should always be called with interrupts enabled
121 * so we just unconditionally hard-enable here. When some kind
122 * of irq tracing is used, we additionally check that condition
125 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
126 lbz r10,PACASOFTIRQEN(r13)
129 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
132 #ifdef CONFIG_PPC_BOOK3E
138 #endif /* CONFIG_PPC_BOOK3E */
140 /* We do need to set SOFTE in the stack frame or the return
141 * from interrupt will be painful
151 addi r9,r1,STACK_FRAME_OVERHEAD
153 clrrdi r11,r1,THREAD_SHIFT
155 andi. r11,r10,_TIF_SYSCALL_T_OR_A
157 syscall_dotrace_cont:
158 cmpldi 0,r0,NR_syscalls
161 system_call: /* label this so stack traces look sane */
163 * Need to vector to 32 Bit or default sys_call_table here,
164 * based on caller's run-mode / personality.
166 ld r11,.SYS_CALL_TABLE@toc(2)
167 andi. r10,r10,_TIF_32BIT
169 addi r11,r11,8 /* use 32-bit syscall entries */
178 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
180 bctrl /* Call handler */
185 bl .do_show_syscall_exit
188 clrrdi r12,r1,THREAD_SHIFT
191 #ifdef CONFIG_PPC_BOOK3S
192 /* No MSR:RI on BookE */
197 * Disable interrupts so current_thread_info()->flags can't change,
198 * and so that we don't get interrupted after loading SRR0/1.
200 #ifdef CONFIG_PPC_BOOK3E
205 #endif /* CONFIG_PPC_BOOK3E */
209 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
210 bne- syscall_exit_work
217 stdcx. r0,0,r1 /* to clear the reservation */
218 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
222 * Clear RI before restoring r13. If we are returning to
223 * userspace and we take an exception after restoring r13,
224 * we end up corrupting the userspace r13 value.
226 #ifdef CONFIG_PPC_BOOK3S
227 /* No MSR:RI on BookE */
230 mtmsrd r11,1 /* clear MSR.RI */
231 #endif /* CONFIG_PPC_BOOK3S */
234 ACCOUNT_CPU_USER_EXIT(r11, r12)
235 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
243 b . /* prevent speculative execution */
246 oris r5,r5,0x1000 /* Set SO bit in CR */
251 /* Traced system call support */
254 addi r3,r1,STACK_FRAME_OVERHEAD
255 bl .do_syscall_trace_enter
257 * Restore argument registers possibly just changed.
258 * We use the return value of do_syscall_trace_enter
259 * for the call number to look up in the table (r0).
268 addi r9,r1,STACK_FRAME_OVERHEAD
269 clrrdi r10,r1,THREAD_SHIFT
271 b syscall_dotrace_cont
278 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
279 If TIF_NOERROR is set, just save r3 as it is. */
281 andi. r0,r9,_TIF_RESTOREALL
285 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */
287 andi. r0,r9,_TIF_NOERROR
291 oris r5,r5,0x1000 /* Set SO bit in CR */
294 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
297 /* Clear per-syscall TIF flags if any are set. */
299 li r11,_TIF_PERSYSCALL_MASK
300 addi r12,r12,TI_FLAGS
305 subi r12,r12,TI_FLAGS
307 4: /* Anything else left to do? */
308 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
309 beq .ret_from_except_lite
311 /* Re-enable interrupts */
312 #ifdef CONFIG_PPC_BOOK3E
318 #endif /* CONFIG_PPC_BOOK3E */
321 addi r3,r1,STACK_FRAME_OVERHEAD
322 bl .do_syscall_trace_leave
325 /* Save non-volatile GPRs, if not already saved. */
337 * The sigsuspend and rt_sigsuspend system calls can call do_signal
338 * and thus put the process into the stopped state where we might
339 * want to examine its user state with ptrace. Therefore we need
340 * to save all the nonvolatile registers (r14 - r31) before calling
341 * the C code. Similarly, fork, vfork and clone need the full
342 * register state on the stack so that it can be copied to the child.
360 _GLOBAL(ppc32_swapcontext)
362 bl .compat_sys_swapcontext
365 _GLOBAL(ppc64_swapcontext)
370 _GLOBAL(ret_from_fork)
377 * This routine switches between two different tasks. The process
378 * state of one is saved on its kernel stack. Then the state
379 * of the other is restored from its kernel stack. The memory
380 * management hardware is updated to the second process's state.
381 * Finally, we can return to the second process, via ret_from_except.
382 * On entry, r3 points to the THREAD for the current task, r4
383 * points to the THREAD for the new task.
385 * Note: there are two ways to get to the "going out" portion
386 * of this code; either by coming in via the entry (_switch)
387 * or via "fork" which must set up an environment equivalent
388 * to the "_switch" path. If you change this you'll have to change
389 * the fork code also.
391 * The code which creates the new task context is in 'copy_thread'
392 * in arch/powerpc/kernel/process.c
398 stdu r1,-SWITCH_FRAME_SIZE(r1)
399 /* r3-r13 are caller saved -- Cort */
402 mflr r20 /* Return to switch caller */
407 oris r0,r0,MSR_VSX@h /* Disable VSX */
408 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
409 #endif /* CONFIG_VSX */
410 #ifdef CONFIG_ALTIVEC
412 oris r0,r0,MSR_VEC@h /* Disable altivec */
413 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
414 std r24,THREAD_VRSAVE(r3)
415 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
416 #endif /* CONFIG_ALTIVEC */
420 std r25,THREAD_DSCR(r3)
421 END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
431 std r1,KSP(r3) /* Set old stack pointer */
434 /* We need a sync somewhere here to make sure that if the
435 * previous task gets rescheduled on another CPU, it sees all
436 * stores it has performed on this one.
439 #endif /* CONFIG_SMP */
442 * If we optimise away the clear of the reservation in system
443 * calls because we know the CPU tracks the address of the
444 * reservation, then we need to clear it here to cover the
445 * case that the kernel context switch path has no larx
450 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
452 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
453 std r6,PACACURRENT(r13) /* Set new 'current' */
455 ld r8,KSP(r4) /* new stack pointer */
456 #ifdef CONFIG_PPC_BOOK3S
458 BEGIN_FTR_SECTION_NESTED(95)
459 clrrdi r6,r8,28 /* get its ESID */
460 clrrdi r9,r1,28 /* get current sp ESID */
461 FTR_SECTION_ELSE_NESTED(95)
462 clrrdi r6,r8,40 /* get its 1T ESID */
463 clrrdi r9,r1,40 /* get current sp 1T ESID */
464 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
467 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
468 clrldi. r0,r6,2 /* is new ESID c00000000? */
469 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
471 beq 2f /* if yes, don't slbie it */
473 /* Bolt in the new stack SLB entry */
474 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
475 oris r0,r6,(SLB_ESID_V)@h
476 ori r0,r0,(SLB_NUM_BOLTED-1)@l
478 li r9,MMU_SEGSIZE_1T /* insert B field */
479 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
480 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
481 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
483 /* Update the last bolted SLB. No write barriers are needed
484 * here, provided we only update the current CPU's SLB shadow
487 ld r9,PACA_SLBSHADOWPTR(r13)
489 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
490 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
491 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
493 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
494 * we have 1TB segments, the only CPUs known to have the errata
495 * only support less than 1TB of system memory and we'll never
496 * actually hit this code path.
500 slbie r6 /* Workaround POWER5 < DD2.1 issue */
504 #endif /* !CONFIG_PPC_BOOK3S */
506 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
507 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
508 because we don't need to leave the 288-byte ABI gap at the
509 top of the kernel stack. */
510 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
512 mr r1,r8 /* start using new stack pointer */
513 std r7,PACAKSAVE(r13)
518 #ifdef CONFIG_ALTIVEC
520 ld r0,THREAD_VRSAVE(r4)
521 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
522 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
523 #endif /* CONFIG_ALTIVEC */
526 ld r0,THREAD_DSCR(r4)
531 END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
534 /* r3-r13 are destroyed -- Cort */
538 /* convert old thread to its task_struct for return value */
540 ld r7,_NIP(r1) /* Return to _switch caller in new task */
542 addi r1,r1,SWITCH_FRAME_SIZE
546 _GLOBAL(ret_from_except)
549 bne .ret_from_except_lite
552 _GLOBAL(ret_from_except_lite)
554 * Disable interrupts so that current_thread_info()->flags
555 * can't change between when we test it and when we return
556 * from the interrupt.
558 #ifdef CONFIG_PPC_BOOK3E
561 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
562 mtmsrd r10,1 /* Update machine state */
563 #endif /* CONFIG_PPC_BOOK3E */
565 #ifdef CONFIG_PREEMPT
566 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
567 li r0,_TIF_NEED_RESCHED /* bits to check */
570 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
571 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
572 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
575 #else /* !CONFIG_PREEMPT */
576 ld r3,_MSR(r1) /* Returning to user mode? */
578 beq restore /* if not, just restore regs and return */
580 /* Check current_thread_info()->flags */
581 clrrdi r9,r1,THREAD_SHIFT
583 andi. r0,r4,_TIF_USER_WORK_MASK
585 #endif /* !CONFIG_PREEMPT */
587 .globl fast_exc_return_irq
591 * This is the main kernel exit path, we first check if we
592 * have to change our interrupt state.
595 lbz r6,PACASOFTIRQEN(r13)
600 /* We do, handle disable first, which is easy */
603 stb r0,PACASOFTIRQEN(r13);
608 * We are about to soft-enable interrupts (we are hard disabled
609 * at this point). We check if there's anything that needs to
612 lbz r0,PACAIRQHAPPENED(r13)
614 bne- restore_check_irq_replay
617 * Get here when nothing happened while soft-disabled, just
618 * soft-enable and move-on. We will hard-enable as a side
624 stb r0,PACASOFTIRQEN(r13);
627 * Final return path. BookE is handled in a different file
630 #ifdef CONFIG_PPC_BOOK3E
631 b .exception_return_book3e
634 * Clear the reservation. If we know the CPU tracks the address of
635 * the reservation then we can potentially save some cycles and use
636 * a larx. On POWER6 and POWER7 this is significantly faster.
639 stdcx. r0,0,r1 /* to clear the reservation */
642 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
645 * Some code path such as load_up_fpu or altivec return directly
646 * here. They run entirely hard disabled and do not alter the
647 * interrupt state. They also don't use lwarx/stwcx. and thus
648 * are known not to leave dangling reservations.
650 .globl fast_exception_return
651 fast_exception_return:
666 * Clear RI before restoring r13. If we are returning to
667 * userspace and we take an exception after restoring r13,
668 * we end up corrupting the userspace r13 value.
670 ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
671 andc r4,r4,r0 /* r0 contains MSR_RI here */
675 * r13 is our per cpu area, only restore it if we are returning to
676 * userspace the value stored in the stack frame may belong to
681 ACCOUNT_CPU_USER_EXIT(r2, r4)
698 b . /* prevent speculative execution */
700 #endif /* CONFIG_PPC_BOOK3E */
703 * Something did happen, check if a re-emit is needed
704 * (this also clears paca->irq_happened)
706 restore_check_irq_replay:
707 /* XXX: We could implement a fast path here where we check
708 * for irq_happened being just 0x01, in which case we can
709 * clear it and return. That means that we would potentially
710 * miss a decrementer having wrapped all the way around.
712 * Still, this might be useful for things like hash_page
714 bl .__check_irq_replay
716 beq restore_no_replay
719 * We need to re-emit an interrupt. We do so by re-using our
720 * existing exception frame. We first change the trap value,
721 * but we need to ensure we preserve the low nibble of it
729 * Then find the right handler and call it. Interrupts are
730 * still soft-disabled and we keep them that way.
734 addi r3,r1,STACK_FRAME_OVERHEAD;
737 1: cmpwi cr0,r3,0x900
739 addi r3,r1,STACK_FRAME_OVERHEAD;
742 #ifdef CONFIG_PPC_BOOK3E
743 1: cmpwi cr0,r3,0x280
745 addi r3,r1,STACK_FRAME_OVERHEAD;
746 bl .doorbell_exception
748 #endif /* CONFIG_PPC_BOOK3E */
749 1: b .ret_from_except /* What else to do here ? */
752 #ifdef CONFIG_PREEMPT
753 andi. r0,r3,MSR_PR /* Returning to user mode? */
755 /* Check that preempt_count() == 0 and interrupts are enabled */
756 lwz r8,TI_PREEMPT(r9)
760 crandc eq,cr1*4+eq,eq
764 * Here we are preempting the current task. We want to make
765 * sure we are soft-disabled first
767 SOFT_DISABLE_INTS(r3,r4)
768 1: bl .preempt_schedule_irq
770 /* Hard-disable interrupts again (and update PACA) */
771 #ifdef CONFIG_PPC_BOOK3E
774 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
776 #endif /* CONFIG_PPC_BOOK3E */
777 li r0,PACA_IRQ_HARD_DIS
778 stb r0,PACAIRQHAPPENED(r13)
780 /* Re-test flags and eventually loop */
781 clrrdi r9,r1,THREAD_SHIFT
783 andi. r0,r4,_TIF_NEED_RESCHED
788 #endif /* CONFIG_PREEMPT */
790 /* Enable interrupts */
791 #ifdef CONFIG_PPC_BOOK3E
796 #endif /* CONFIG_PPC_BOOK3E */
798 andi. r0,r4,_TIF_NEED_RESCHED
800 bl .restore_interrupts
802 b .ret_from_except_lite
805 bl .restore_interrupts
806 addi r3,r1,STACK_FRAME_OVERHEAD
811 addi r3,r1,STACK_FRAME_OVERHEAD
812 bl .unrecoverable_exception
815 #ifdef CONFIG_PPC_RTAS
817 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
818 * called with the MMU off.
820 * In addition, we need to be in 32b mode, at least for now.
822 * Note: r3 is an input parameter to rtas, so don't trash it...
827 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
829 /* Because RTAS is running in 32b mode, it clobbers the high order half
830 * of all registers that it saves. We therefore save those registers
831 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
833 SAVE_GPR(2, r1) /* Save the TOC */
834 SAVE_GPR(13, r1) /* Save paca */
835 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
836 SAVE_10GPRS(22, r1) /* ditto */
849 /* Temporary workaround to clear CR until RTAS can be modified to
856 /* There is no way it is acceptable to get here with interrupts enabled,
857 * check it with the asm equivalent of WARN_ON
859 lbz r0,PACASOFTIRQEN(r13)
861 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
864 /* Hard-disable interrupts */
870 /* Unfortunately, the stack pointer and the MSR are also clobbered,
871 * so they are saved in the PACA which allows us to restore
872 * our original state after RTAS returns.
875 std r6,PACASAVEDMSR(r13)
877 /* Setup our real return addr */
878 LOAD_REG_ADDR(r4,.rtas_return_loc)
879 clrldi r4,r4,2 /* convert to realmode address */
883 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
887 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
888 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
890 sync /* disable interrupts so SRR0/1 */
891 mtmsrd r0 /* don't get trashed */
893 LOAD_REG_ADDR(r4, rtas)
894 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
895 ld r4,RTASBASE(r4) /* get the rtas->base value */
900 b . /* prevent speculative execution */
902 _STATIC(rtas_return_loc)
903 /* relocation is off at this point */
905 clrldi r4,r4,2 /* convert to realmode address */
909 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
917 ld r1,PACAR1(r4) /* Restore our SP */
918 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
923 b . /* prevent speculative execution */
926 1: .llong .rtas_restore_regs
928 _STATIC(rtas_restore_regs)
929 /* relocation is on at this point */
930 REST_GPR(2, r1) /* Restore the TOC */
931 REST_GPR(13, r1) /* Restore paca */
932 REST_8GPRS(14, r1) /* Restore the non-volatiles */
933 REST_10GPRS(22, r1) /* ditto */
948 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
949 ld r0,16(r1) /* get return address */
952 blr /* return to caller */
954 #endif /* CONFIG_PPC_RTAS */
959 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
961 /* Because PROM is running in 32b mode, it clobbers the high order half
962 * of all registers that it saves. We therefore save those registers
963 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
974 /* Get the PROM entrypoint */
977 /* Switch MSR to 32 bits mode
979 #ifdef CONFIG_PPC_BOOK3E
980 rlwinm r11,r11,0,1,31
982 #else /* CONFIG_PPC_BOOK3E */
985 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
988 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
991 #endif /* CONFIG_PPC_BOOK3E */
994 /* Enter PROM here... */
997 /* Just make sure that r1 top 32 bits didn't get
1002 /* Restore the MSR (back to 64 bits) */
1007 /* Restore other registers */
1015 addi r1,r1,PROM_FRAME_SIZE
1020 #ifdef CONFIG_FUNCTION_TRACER
1021 #ifdef CONFIG_DYNAMIC_FTRACE
1026 _GLOBAL(ftrace_caller)
1027 /* Taken from output of objdump from lib64/glibc */
1033 subi r3, r3, MCOUNT_INSN_SIZE
1038 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1039 .globl ftrace_graph_call
1042 _GLOBAL(ftrace_graph_stub)
1047 _GLOBAL(ftrace_stub)
1054 /* Taken from output of objdump from lib64/glibc */
1061 subi r3, r3, MCOUNT_INSN_SIZE
1062 LOAD_REG_ADDR(r5,ftrace_trace_function)
1070 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1071 b ftrace_graph_caller
1076 _GLOBAL(ftrace_stub)
1079 #endif /* CONFIG_DYNAMIC_FTRACE */
1081 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1082 _GLOBAL(ftrace_graph_caller)
1083 /* load r4 with local address */
1085 subi r4, r4, MCOUNT_INSN_SIZE
1087 /* get the parent address */
1091 bl .prepare_ftrace_return
1099 _GLOBAL(return_to_handler)
1100 /* need to save return values */
1107 bl .ftrace_return_to_handler
1110 /* return value has real return address */
1118 /* Jump back to real return address */
1121 _GLOBAL(mod_return_to_handler)
1122 /* need to save return values */
1132 * We are in a module using the module's TOC.
1133 * Switch to our TOC to run inside the core kernel.
1137 bl .ftrace_return_to_handler
1140 /* return value has real return address */
1149 /* Jump back to real return address */
1151 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1152 #endif /* CONFIG_FUNCTION_TRACER */