2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
56 #ifndef CONFIG_PREEMPT
57 #define retint_kernel retint_restore_args
61 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
62 #ifdef CONFIG_TRACE_IRQFLAGS
63 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
71 * C code is not supposed to know about undefined top of stack. Every time
72 * a C function with an pt_regs argument is called from the SYSCALL based
73 * fast path FIXUP_TOP_OF_STACK is needed.
74 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
78 /* %rsp:at FRAMEEND */
79 .macro FIXUP_TOP_OF_STACK tmp
80 movq %gs:pda_oldrsp,\tmp
82 movq $__USER_DS,SS(%rsp)
83 movq $__USER_CS,CS(%rsp)
85 movq R11(%rsp),\tmp /* get eflags */
86 movq \tmp,EFLAGS(%rsp)
89 .macro RESTORE_TOP_OF_STACK tmp,offset=0
90 movq RSP-\offset(%rsp),\tmp
91 movq \tmp,%gs:pda_oldrsp
92 movq EFLAGS-\offset(%rsp),\tmp
93 movq \tmp,R11-\offset(%rsp)
96 .macro FAKE_STACK_FRAME child_rip
97 /* push in order ss, rsp, eflags, cs, rip */
100 CFI_ADJUST_CFA_OFFSET 8
101 /*CFI_REL_OFFSET ss,0*/
103 CFI_ADJUST_CFA_OFFSET 8
105 pushq $(1<<9) /* eflags - interrupts on */
106 CFI_ADJUST_CFA_OFFSET 8
107 /*CFI_REL_OFFSET rflags,0*/
108 pushq $__KERNEL_CS /* cs */
109 CFI_ADJUST_CFA_OFFSET 8
110 /*CFI_REL_OFFSET cs,0*/
111 pushq \child_rip /* rip */
112 CFI_ADJUST_CFA_OFFSET 8
114 pushq %rax /* orig rax */
115 CFI_ADJUST_CFA_OFFSET 8
118 .macro UNFAKE_STACK_FRAME
120 CFI_ADJUST_CFA_OFFSET -(6*8)
123 .macro CFI_DEFAULT_STACK start=1
129 CFI_DEF_CFA_OFFSET SS+8
131 CFI_REL_OFFSET r15,R15
132 CFI_REL_OFFSET r14,R14
133 CFI_REL_OFFSET r13,R13
134 CFI_REL_OFFSET r12,R12
135 CFI_REL_OFFSET rbp,RBP
136 CFI_REL_OFFSET rbx,RBX
137 CFI_REL_OFFSET r11,R11
138 CFI_REL_OFFSET r10,R10
141 CFI_REL_OFFSET rax,RAX
142 CFI_REL_OFFSET rcx,RCX
143 CFI_REL_OFFSET rdx,RDX
144 CFI_REL_OFFSET rsi,RSI
145 CFI_REL_OFFSET rdi,RDI
146 CFI_REL_OFFSET rip,RIP
147 /*CFI_REL_OFFSET cs,CS*/
148 /*CFI_REL_OFFSET rflags,EFLAGS*/
149 CFI_REL_OFFSET rsp,RSP
150 /*CFI_REL_OFFSET ss,SS*/
153 * A newly forked process directly context switches into this.
158 push kernel_eflags(%rip)
159 CFI_ADJUST_CFA_OFFSET 4
160 popf # reset kernel eflags
161 CFI_ADJUST_CFA_OFFSET -4
163 GET_THREAD_INFO(%rcx)
164 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
168 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
169 je int_ret_from_sys_call
170 testl $_TIF_IA32,threadinfo_flags(%rcx)
171 jnz int_ret_from_sys_call
172 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
173 jmp ret_from_sys_call
176 call syscall_trace_leave
177 GET_THREAD_INFO(%rcx)
183 * System call entry. Upto 6 arguments in registers are supported.
185 * SYSCALL does not save anything on the stack and does not change the
191 * rax system call number
193 * rcx return address for syscall/sysret, C arg3
196 * r10 arg3 (--> moved to rcx for C)
199 * r11 eflags for syscall/sysret, temporary for C
200 * r12-r15,rbp,rbx saved by C code, not touched.
202 * Interrupts are off on entry.
203 * Only called from user space.
205 * XXX if we had a free scratch register we could save the RSP into the stack frame
206 * and report it properly in ps. Unfortunately we haven't.
208 * When user can change the frames always force IRET. That is because
209 * it deals with uncanonical addresses better. SYSRET has trouble
210 * with them due to bugs in both AMD and Intel CPUs.
216 CFI_DEF_CFA rsp,PDA_STACKOFFSET
218 /*CFI_REGISTER rflags,r11*/
220 movq %rsp,%gs:pda_oldrsp
221 movq %gs:pda_kernelstack,%rsp
223 * No need to follow this irqs off/on section - it's straight
228 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
229 movq %rcx,RIP-ARGOFFSET(%rsp)
230 CFI_REL_OFFSET rip,RIP-ARGOFFSET
231 GET_THREAD_INFO(%rcx)
232 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
235 cmpq $__NR_syscall_max,%rax
238 call *sys_call_table(,%rax,8) # XXX: rip relative
239 movq %rax,RAX-ARGOFFSET(%rsp)
241 * Syscall return path ending with SYSRET (fast path)
242 * Has incomplete stack frame and undefined top of stack.
244 .globl ret_from_sys_call
246 movl $_TIF_ALLWORK_MASK,%edi
249 GET_THREAD_INFO(%rcx)
252 movl threadinfo_flags(%rcx),%edx
257 * sysretq will re-enable interrupts:
260 movq RIP-ARGOFFSET(%rsp),%rcx
262 RESTORE_ARGS 0,-ARG_SKIP,1
263 /*CFI_REGISTER rflags,r11*/
264 movq %gs:pda_oldrsp,%rsp
268 /* Handle reschedules */
269 /* edx: work, edi: workmask */
272 bt $TIF_NEED_RESCHED,%edx
277 CFI_ADJUST_CFA_OFFSET 8
280 CFI_ADJUST_CFA_OFFSET -8
283 /* Handle a signal */
287 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
290 /* Really a signal */
291 /* edx: work flags (arg3) */
292 leaq do_notify_resume(%rip),%rax
293 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
294 xorl %esi,%esi # oldset -> arg2
295 call ptregscall_common
296 1: movl $_TIF_NEED_RESCHED,%edi
297 /* Use IRET because user could have changed frame. This
298 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
304 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
305 jmp ret_from_sys_call
307 /* Do syscall tracing */
311 movq $-ENOSYS,RAX(%rsp)
312 FIXUP_TOP_OF_STACK %rdi
314 call syscall_trace_enter
315 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
317 cmpq $__NR_syscall_max,%rax
321 movq %r10,%rcx /* fixup for C */
322 call *sys_call_table(,%rax,8)
323 1: movq %rax,RAX-ARGOFFSET(%rsp)
324 /* Use IRET because user could have changed frame */
325 jmp int_ret_from_sys_call
330 * Syscall return path ending with IRET.
331 * Has correct top of stack, but partial stack frame.
333 ENTRY(int_ret_from_sys_call)
336 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
337 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
338 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
339 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
340 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
341 CFI_REL_OFFSET rip,RIP-ARGOFFSET
342 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
343 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
344 CFI_REL_OFFSET rax,RAX-ARGOFFSET
345 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
346 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
347 CFI_REL_OFFSET r8,R8-ARGOFFSET
348 CFI_REL_OFFSET r9,R9-ARGOFFSET
349 CFI_REL_OFFSET r10,R10-ARGOFFSET
350 CFI_REL_OFFSET r11,R11-ARGOFFSET
353 testl $3,CS-ARGOFFSET(%rsp)
354 je retint_restore_args
355 movl $_TIF_ALLWORK_MASK,%edi
356 /* edi: mask to check */
358 GET_THREAD_INFO(%rcx)
359 movl threadinfo_flags(%rcx),%edx
362 andl $~TS_COMPAT,threadinfo_status(%rcx)
365 /* Either reschedule or signal or syscall exit tracking needed. */
366 /* First do a reschedule test. */
367 /* edx: work, edi: workmask */
369 bt $TIF_NEED_RESCHED,%edx
374 CFI_ADJUST_CFA_OFFSET 8
377 CFI_ADJUST_CFA_OFFSET -8
382 /* handle signals and tracing -- both require a full stack frame */
387 /* Check for syscall exit trace */
388 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
391 CFI_ADJUST_CFA_OFFSET 8
392 leaq 8(%rsp),%rdi # &ptregs -> arg1
393 call syscall_trace_leave
395 CFI_ADJUST_CFA_OFFSET -8
396 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
402 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
404 movq %rsp,%rdi # &ptregs -> arg1
405 xorl %esi,%esi # oldset -> arg2
406 call do_notify_resume
407 1: movl $_TIF_NEED_RESCHED,%edi
414 END(int_ret_from_sys_call)
417 * Certain special system calls that need to save a complete full stack frame.
420 .macro PTREGSCALL label,func,arg
423 leaq \func(%rip),%rax
424 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
425 jmp ptregscall_common
431 PTREGSCALL stub_clone, sys_clone, %r8
432 PTREGSCALL stub_fork, sys_fork, %rdi
433 PTREGSCALL stub_vfork, sys_vfork, %rdi
434 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
435 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
436 PTREGSCALL stub_iopl, sys_iopl, %rsi
438 ENTRY(ptregscall_common)
440 CFI_ADJUST_CFA_OFFSET -8
441 CFI_REGISTER rip, r11
444 CFI_REGISTER rip, r15
445 FIXUP_TOP_OF_STACK %r11
447 RESTORE_TOP_OF_STACK %r11
449 CFI_REGISTER rip, r11
452 CFI_ADJUST_CFA_OFFSET 8
453 CFI_REL_OFFSET rip, 0
456 END(ptregscall_common)
461 CFI_ADJUST_CFA_OFFSET -8
462 CFI_REGISTER rip, r11
464 FIXUP_TOP_OF_STACK %r11
466 RESTORE_TOP_OF_STACK %r11
469 jmp int_ret_from_sys_call
474 * sigreturn is special because it needs to restore all registers on return.
475 * This cannot be done with SYSRET, so use the IRET return path instead.
477 ENTRY(stub_rt_sigreturn)
480 CFI_ADJUST_CFA_OFFSET -8
483 FIXUP_TOP_OF_STACK %r11
484 call sys_rt_sigreturn
485 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
487 jmp int_ret_from_sys_call
489 END(stub_rt_sigreturn)
492 * initial frame state for interrupts and exceptions
497 CFI_DEF_CFA rsp,SS+8-\ref
498 /*CFI_REL_OFFSET ss,SS-\ref*/
499 CFI_REL_OFFSET rsp,RSP-\ref
500 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
501 /*CFI_REL_OFFSET cs,CS-\ref*/
502 CFI_REL_OFFSET rip,RIP-\ref
505 /* initial frame state for interrupts (and exceptions without error code) */
506 #define INTR_FRAME _frame RIP
507 /* initial frame state for exceptions with error code (and interrupts with
508 vector already pushed) */
509 #define XCPT_FRAME _frame ORIG_RAX
512 * Interrupt entry/exit.
514 * Interrupt entry points save only callee clobbered registers in fast path.
516 * Entry runs with interrupts off.
519 /* 0(%rsp): interrupt number */
520 .macro interrupt func
523 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
525 CFI_ADJUST_CFA_OFFSET 8
526 CFI_REL_OFFSET rbp, 0
528 CFI_DEF_CFA_REGISTER rbp
532 /* irqcount is used to check if a CPU is already on an interrupt
533 stack or not. While this is essentially redundant with preempt_count
534 it is a little cheaper to use a separate counter in the PDA
535 (short of moving irq_enter into assembly, which would be too
537 1: incl %gs:pda_irqcount
538 cmoveq %gs:pda_irqstackptr,%rsp
539 push %rbp # backlink for old unwinder
540 CFI_ADJUST_CFA_OFFSET 8
543 * We entered an interrupt context - irqs are off:
549 ENTRY(common_interrupt)
552 /* 0(%rsp): oldrsp-ARGOFFSET */
556 decl %gs:pda_irqcount
558 CFI_DEF_CFA_REGISTER rsp
559 CFI_ADJUST_CFA_OFFSET -8
561 GET_THREAD_INFO(%rcx)
562 testl $3,CS-ARGOFFSET(%rsp)
565 /* Interrupt came from user space */
567 * Has a correct top of stack, but a partial stack frame
568 * %rcx: thread info. Interrupts off.
570 retint_with_reschedule:
571 movl $_TIF_WORK_MASK,%edi
573 movl threadinfo_flags(%rcx),%edx
579 * The iretq could re-enable interrupts:
589 * The iretq could re-enable interrupts:
597 .section __ex_table,"a"
598 .quad iret_label,bad_iret
601 /* force a signal here? this matches i386 behaviour */
602 /* running with kernel gs */
604 movq $11,%rdi /* SIGSEGV */
610 /* edi: workmask, edx: work */
613 bt $TIF_NEED_RESCHED,%edx
618 CFI_ADJUST_CFA_OFFSET 8
621 CFI_ADJUST_CFA_OFFSET -8
622 GET_THREAD_INFO(%rcx)
628 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
633 movq $-1,ORIG_RAX(%rsp)
634 xorl %esi,%esi # oldset
635 movq %rsp,%rdi # &pt_regs
636 call do_notify_resume
640 movl $_TIF_NEED_RESCHED,%edi
641 GET_THREAD_INFO(%rcx)
644 #ifdef CONFIG_PREEMPT
645 /* Returning to kernel space. Check if we need preemption */
646 /* rcx: threadinfo. interrupts off. */
648 cmpl $0,threadinfo_preempt_count(%rcx)
649 jnz retint_restore_args
650 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
651 jnc retint_restore_args
652 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
653 jnc retint_restore_args
654 call preempt_schedule_irq
659 END(common_interrupt)
664 .macro apicinterrupt num,func
667 CFI_ADJUST_CFA_OFFSET 8
673 ENTRY(thermal_interrupt)
674 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
675 END(thermal_interrupt)
677 ENTRY(threshold_interrupt)
678 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
679 END(threshold_interrupt)
682 ENTRY(reschedule_interrupt)
683 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
684 END(reschedule_interrupt)
686 .macro INVALIDATE_ENTRY num
687 ENTRY(invalidate_interrupt\num)
688 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
689 END(invalidate_interrupt\num)
701 ENTRY(call_function_interrupt)
702 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
703 END(call_function_interrupt)
706 ENTRY(apic_timer_interrupt)
707 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
708 END(apic_timer_interrupt)
710 ENTRY(error_interrupt)
711 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
714 ENTRY(spurious_interrupt)
715 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
716 END(spurious_interrupt)
719 * Exception entry points.
723 pushq $0 /* push error code/oldrax */
724 CFI_ADJUST_CFA_OFFSET 8
725 pushq %rax /* push real oldrax to the rdi slot */
726 CFI_ADJUST_CFA_OFFSET 8
732 .macro errorentry sym
735 CFI_ADJUST_CFA_OFFSET 8
741 /* error code is on the stack already */
742 /* handle NMI like exceptions that can happen everywhere */
743 .macro paranoidentry sym, ist=0, irqtrace=1
747 movl $MSR_GS_BASE,%ecx
755 movq %gs:pda_data_offset, %rbp
758 movq ORIG_RAX(%rsp),%rsi
759 movq $-1,ORIG_RAX(%rsp)
761 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
765 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
774 * "Paranoid" exit path from exception stack.
775 * Paranoid because this is used by NMIs and cannot take
776 * any kernel state for granted.
777 * We don't do kernel preemption checks here, because only
778 * NMI should be common and it does not enable IRQs and
779 * cannot get reschedule ticks.
781 * "trace" is 0 for the NMI handler only, because irq-tracing
782 * is fundamentally NMI-unsafe. (we cannot change the soft and
783 * hard flags at once, atomically)
785 .macro paranoidexit trace=1
786 /* ebx: no swapgs flag */
788 testl %ebx,%ebx /* swapgs needed? */
789 jnz paranoid_restore\trace
791 jnz paranoid_userspace\trace
792 paranoid_swapgs\trace:
797 paranoid_restore\trace:
800 paranoid_userspace\trace:
801 GET_THREAD_INFO(%rcx)
802 movl threadinfo_flags(%rcx),%ebx
803 andl $_TIF_WORK_MASK,%ebx
804 jz paranoid_swapgs\trace
805 movq %rsp,%rdi /* &pt_regs */
807 movq %rax,%rsp /* switch stack for scheduling */
808 testl $_TIF_NEED_RESCHED,%ebx
809 jnz paranoid_schedule\trace
810 movl %ebx,%edx /* arg3: thread flags */
815 xorl %esi,%esi /* arg2: oldset */
816 movq %rsp,%rdi /* arg1: &pt_regs */
817 call do_notify_resume
822 jmp paranoid_userspace\trace
823 paranoid_schedule\trace:
833 jmp paranoid_userspace\trace
838 * Exception entry point. This expects an error code/orig_rax on the stack
839 * and the exception handler in %rax.
841 KPROBE_ENTRY(error_entry)
843 /* rdi slot contains rax, oldrax contains error code */
846 CFI_ADJUST_CFA_OFFSET (14*8)
848 CFI_REL_OFFSET rsi,RSI
849 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
851 CFI_REL_OFFSET rdx,RDX
853 CFI_REL_OFFSET rcx,RCX
854 movq %rsi,10*8(%rsp) /* store rax */
855 CFI_REL_OFFSET rax,RAX
861 CFI_REL_OFFSET r10,R10
863 CFI_REL_OFFSET r11,R11
865 CFI_REL_OFFSET rbx,RBX
867 CFI_REL_OFFSET rbp,RBP
869 CFI_REL_OFFSET r12,R12
871 CFI_REL_OFFSET r13,R13
873 CFI_REL_OFFSET r14,R14
875 CFI_REL_OFFSET r15,R15
884 movq ORIG_RAX(%rsp),%rsi /* get error code */
885 movq $-1,ORIG_RAX(%rsp)
887 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
893 GET_THREAD_INFO(%rcx)
896 movl threadinfo_flags(%rcx),%edx
897 movl $_TIF_WORK_MASK,%edi
901 * The iret might restore flags:
911 /* There are two places in the kernel that can potentially fault with
912 usergs. Handle them here. The exception handlers after
913 iret run with kernel gs again, so don't set the user space flag.
914 B stepping K8s sometimes report an truncated RIP for IRET
915 exceptions returning to compat mode. Check for these here too. */
916 leaq iret_label(%rip),%rbp
919 movl %ebp,%ebp /* zero extend */
922 cmpq $gs_change,RIP(%rsp)
925 KPROBE_END(error_entry)
927 /* Reload gs selector with exception handling */
928 /* edi: new selector */
932 CFI_ADJUST_CFA_OFFSET 8
937 2: mfence /* workaround */
940 CFI_ADJUST_CFA_OFFSET -8
943 ENDPROC(load_gs_index)
945 .section __ex_table,"a"
947 .quad gs_change,bad_gs
950 /* running with kernelgs */
952 swapgs /* switch back to user gs */
959 * Create a kernel thread.
961 * C extern interface:
962 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
964 * asm input arguments:
965 * rdi: fn, rsi: arg, rdx: flags
969 FAKE_STACK_FRAME $child_rip
972 # rdi: flags, rsi: usp, rdx: will be &pt_regs
974 orq kernel_thread_flags(%rip),%rdi
987 /* terminate stack in child */
992 * It isn't worth to check for reschedule here,
993 * so internally to the x86_64 port you can rely on kernel_thread()
994 * not to reschedule the child before returning, this avoids the need
995 * of hacks for example to fork off the per-CPU idle tasks.
996 * [Hopefully no generic code relies on the reschedule -AK]
1002 ENDPROC(kernel_thread)
1005 pushq $0 # fake return address
1008 * Here we are in the child and the registers are set as they were
1009 * at kernel_thread() invocation in the parent.
1021 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1023 * C extern interface:
1024 * extern long execve(char *name, char **argv, char **envp)
1026 * asm input arguments:
1027 * rdi: name, rsi: argv, rdx: envp
1029 * We want to fallback into:
1030 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
1032 * do_sys_execve asm fallback arguments:
1033 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
1035 ENTRY(kernel_execve)
1040 movq %rax, RAX(%rsp)
1043 je int_ret_from_sys_call
1048 ENDPROC(kernel_execve)
1050 KPROBE_ENTRY(page_fault)
1051 errorentry do_page_fault
1052 KPROBE_END(page_fault)
1054 ENTRY(coprocessor_error)
1055 zeroentry do_coprocessor_error
1056 END(coprocessor_error)
1058 ENTRY(simd_coprocessor_error)
1059 zeroentry do_simd_coprocessor_error
1060 END(simd_coprocessor_error)
1062 ENTRY(device_not_available)
1063 zeroentry math_state_restore
1064 END(device_not_available)
1066 /* runs on exception stack */
1070 CFI_ADJUST_CFA_OFFSET 8
1071 paranoidentry do_debug, DEBUG_STACK
1075 /* runs on exception stack */
1079 CFI_ADJUST_CFA_OFFSET 8
1080 paranoidentry do_nmi, 0, 0
1081 #ifdef CONFIG_TRACE_IRQFLAGS
1092 CFI_ADJUST_CFA_OFFSET 8
1093 paranoidentry do_int3, DEBUG_STACK
1099 zeroentry do_overflow
1107 zeroentry do_invalid_op
1110 ENTRY(coprocessor_segment_overrun)
1111 zeroentry do_coprocessor_segment_overrun
1112 END(coprocessor_segment_overrun)
1115 zeroentry do_reserved
1118 /* runs on exception stack */
1121 paranoidentry do_double_fault
1127 errorentry do_invalid_TSS
1130 ENTRY(segment_not_present)
1131 errorentry do_segment_not_present
1132 END(segment_not_present)
1134 /* runs on exception stack */
1135 ENTRY(stack_segment)
1137 paranoidentry do_stack_segment
1142 KPROBE_ENTRY(general_protection)
1143 errorentry do_general_protection
1144 KPROBE_END(general_protection)
1146 ENTRY(alignment_check)
1147 errorentry do_alignment_check
1148 END(alignment_check)
1151 zeroentry do_divide_error
1154 ENTRY(spurious_interrupt_bug)
1155 zeroentry do_spurious_interrupt_bug
1156 END(spurious_interrupt_bug)
1158 #ifdef CONFIG_X86_MCE
1159 /* runs on exception stack */
1160 ENTRY(machine_check)
1163 CFI_ADJUST_CFA_OFFSET 8
1164 paranoidentry do_machine_check
1170 /* Call softirq on interrupt stack. Interrupts are off. */
1174 CFI_ADJUST_CFA_OFFSET 8
1175 CFI_REL_OFFSET rbp,0
1177 CFI_DEF_CFA_REGISTER rbp
1178 incl %gs:pda_irqcount
1179 cmove %gs:pda_irqstackptr,%rsp
1180 push %rbp # backlink for old unwinder
1181 CFI_ADJUST_CFA_OFFSET 8
1184 CFI_DEF_CFA_REGISTER rsp
1185 CFI_ADJUST_CFA_OFFSET -8
1186 decl %gs:pda_irqcount
1189 ENDPROC(call_softirq)
1191 #ifdef CONFIG_STACK_UNWIND
1192 ENTRY(arch_unwind_init_running)
1194 movq %r15, R15(%rdi)
1195 movq %r14, R14(%rdi)
1197 movq %r13, R13(%rdi)
1198 movq %r12, R12(%rdi)
1200 movq %rbp, RBP(%rdi)
1201 movq %rbx, RBX(%rdi)
1203 movq %rax, R11(%rdi)
1204 movq %rax, R10(%rdi)
1207 movq %rax, RAX(%rdi)
1208 movq %rax, RCX(%rdi)
1209 movq %rax, RDX(%rdi)
1210 movq %rax, RSI(%rdi)
1211 movq %rax, RDI(%rdi)
1212 movq %rax, ORIG_RAX(%rdi)
1213 movq %rcx, RIP(%rdi)
1215 movq $__KERNEL_CS, CS(%rdi)
1216 movq %rax, EFLAGS(%rdi)
1217 movq %rcx, RSP(%rdi)
1218 movq $__KERNEL_DS, SS(%rdi)
1221 ENDPROC(arch_unwind_init_running)