2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
12 * entry.S contains the system-call and fault low-level handling routines.
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
27 * - schedule it carefully for the final hardware.
31 #include <linux/config.h>
32 #include <linux/linkage.h>
33 #include <asm/segment.h>
35 #include <asm/cache.h>
36 #include <asm/errno.h>
37 #include <asm/dwarf2.h>
38 #include <asm/calling.h>
39 #include <asm/asm-offsets.h>
41 #include <asm/unistd.h>
42 #include <asm/thread_info.h>
43 #include <asm/hw_irq.h>
47 #ifndef CONFIG_PREEMPT
48 #define retint_kernel retint_restore_args
52 * C code is not supposed to know about undefined top of stack. Every time
53 * a C function with an pt_regs argument is called from the SYSCALL based
54 * fast path FIXUP_TOP_OF_STACK is needed.
55 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
59 /* %rsp:at FRAMEEND */
60 .macro FIXUP_TOP_OF_STACK tmp
61 movq %gs:pda_oldrsp,\tmp
63 movq $__USER_DS,SS(%rsp)
64 movq $__USER_CS,CS(%rsp)
66 movq R11(%rsp),\tmp /* get eflags */
67 movq \tmp,EFLAGS(%rsp)
70 .macro RESTORE_TOP_OF_STACK tmp,offset=0
71 movq RSP-\offset(%rsp),\tmp
72 movq \tmp,%gs:pda_oldrsp
73 movq EFLAGS-\offset(%rsp),\tmp
74 movq \tmp,R11-\offset(%rsp)
77 .macro FAKE_STACK_FRAME child_rip
78 /* push in order ss, rsp, eflags, cs, rip */
81 CFI_ADJUST_CFA_OFFSET 8
82 /*CFI_REL_OFFSET ss,0*/
84 CFI_ADJUST_CFA_OFFSET 8
86 pushq $(1<<9) /* eflags - interrupts on */
87 CFI_ADJUST_CFA_OFFSET 8
88 /*CFI_REL_OFFSET rflags,0*/
89 pushq $__KERNEL_CS /* cs */
90 CFI_ADJUST_CFA_OFFSET 8
91 /*CFI_REL_OFFSET cs,0*/
92 pushq \child_rip /* rip */
93 CFI_ADJUST_CFA_OFFSET 8
95 pushq %rax /* orig rax */
96 CFI_ADJUST_CFA_OFFSET 8
99 .macro UNFAKE_STACK_FRAME
101 CFI_ADJUST_CFA_OFFSET -(6*8)
104 .macro CFI_DEFAULT_STACK start=1
109 CFI_DEF_CFA_OFFSET SS+8
111 CFI_REL_OFFSET r15,R15
112 CFI_REL_OFFSET r14,R14
113 CFI_REL_OFFSET r13,R13
114 CFI_REL_OFFSET r12,R12
115 CFI_REL_OFFSET rbp,RBP
116 CFI_REL_OFFSET rbx,RBX
117 CFI_REL_OFFSET r11,R11
118 CFI_REL_OFFSET r10,R10
121 CFI_REL_OFFSET rax,RAX
122 CFI_REL_OFFSET rcx,RCX
123 CFI_REL_OFFSET rdx,RDX
124 CFI_REL_OFFSET rsi,RSI
125 CFI_REL_OFFSET rdi,RDI
126 CFI_REL_OFFSET rip,RIP
127 /*CFI_REL_OFFSET cs,CS*/
128 /*CFI_REL_OFFSET rflags,EFLAGS*/
129 CFI_REL_OFFSET rsp,RSP
130 /*CFI_REL_OFFSET ss,SS*/
133 * A newly forked process directly context switches into this.
139 GET_THREAD_INFO(%rcx)
140 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
144 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
145 je int_ret_from_sys_call
146 testl $_TIF_IA32,threadinfo_flags(%rcx)
147 jnz int_ret_from_sys_call
148 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
149 jmp ret_from_sys_call
152 call syscall_trace_leave
153 GET_THREAD_INFO(%rcx)
158 * System call entry. Upto 6 arguments in registers are supported.
160 * SYSCALL does not save anything on the stack and does not change the
166 * rax system call number
168 * rcx return address for syscall/sysret, C arg3
171 * r10 arg3 (--> moved to rcx for C)
174 * r11 eflags for syscall/sysret, temporary for C
175 * r12-r15,rbp,rbx saved by C code, not touched.
177 * Interrupts are off on entry.
178 * Only called from user space.
180 * XXX if we had a free scratch register we could save the RSP into the stack frame
181 * and report it properly in ps. Unfortunately we haven't.
188 /*CFI_REGISTER rflags,r11*/
190 movq %rsp,%gs:pda_oldrsp
191 movq %gs:pda_kernelstack,%rsp
194 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
195 movq %rcx,RIP-ARGOFFSET(%rsp)
196 CFI_REL_OFFSET rip,RIP-ARGOFFSET
197 GET_THREAD_INFO(%rcx)
198 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
201 cmpq $__NR_syscall_max,%rax
204 call *sys_call_table(,%rax,8) # XXX: rip relative
205 movq %rax,RAX-ARGOFFSET(%rsp)
207 * Syscall return path ending with SYSRET (fast path)
208 * Has incomplete stack frame and undefined top of stack.
210 .globl ret_from_sys_call
212 movl $_TIF_ALLWORK_MASK,%edi
215 GET_THREAD_INFO(%rcx)
217 movl threadinfo_flags(%rcx),%edx
221 movq RIP-ARGOFFSET(%rsp),%rcx
223 RESTORE_ARGS 0,-ARG_SKIP,1
224 /*CFI_REGISTER rflags,r11*/
225 movq %gs:pda_oldrsp,%rsp
229 /* Handle reschedules */
230 /* edx: work, edi: workmask */
233 bt $TIF_NEED_RESCHED,%edx
237 CFI_ADJUST_CFA_OFFSET 8
240 CFI_ADJUST_CFA_OFFSET -8
243 /* Handle a signal */
246 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
249 /* Really a signal */
250 /* edx: work flags (arg3) */
251 leaq do_notify_resume(%rip),%rax
252 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
253 xorl %esi,%esi # oldset -> arg2
254 call ptregscall_common
255 1: movl $_TIF_NEED_RESCHED,%edi
259 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
260 jmp ret_from_sys_call
262 /* Do syscall tracing */
266 movq $-ENOSYS,RAX(%rsp)
267 FIXUP_TOP_OF_STACK %rdi
269 call syscall_trace_enter
270 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
272 cmpq $__NR_syscall_max,%rax
274 movq %r10,%rcx /* fixup for C */
275 call *sys_call_table(,%rax,8)
276 movq %rax,RAX-ARGOFFSET(%rsp)
279 call syscall_trace_leave
280 RESTORE_TOP_OF_STACK %rbx
282 jmp ret_from_sys_call
286 * Syscall return path ending with IRET.
287 * Has correct top of stack, but partial stack frame.
289 ENTRY(int_ret_from_sys_call)
291 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
292 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
293 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
294 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
295 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
296 CFI_REL_OFFSET rip,RIP-ARGOFFSET
297 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
298 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
299 CFI_REL_OFFSET rax,RAX-ARGOFFSET
300 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
301 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
302 CFI_REL_OFFSET r8,R8-ARGOFFSET
303 CFI_REL_OFFSET r9,R9-ARGOFFSET
304 CFI_REL_OFFSET r10,R10-ARGOFFSET
305 CFI_REL_OFFSET r11,R11-ARGOFFSET
307 testl $3,CS-ARGOFFSET(%rsp)
308 je retint_restore_args
309 movl $_TIF_ALLWORK_MASK,%edi
310 /* edi: mask to check */
312 GET_THREAD_INFO(%rcx)
313 movl threadinfo_flags(%rcx),%edx
316 andl $~TS_COMPAT,threadinfo_status(%rcx)
319 /* Either reschedule or signal or syscall exit tracking needed. */
320 /* First do a reschedule test. */
321 /* edx: work, edi: workmask */
323 bt $TIF_NEED_RESCHED,%edx
327 CFI_ADJUST_CFA_OFFSET 8
330 CFI_ADJUST_CFA_OFFSET -8
334 /* handle signals and tracing -- both require a full stack frame */
338 /* Check for syscall exit trace */
339 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
342 CFI_ADJUST_CFA_OFFSET 8
343 leaq 8(%rsp),%rdi # &ptregs -> arg1
344 call syscall_trace_leave
346 CFI_ADJUST_CFA_OFFSET -8
347 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
352 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
354 movq %rsp,%rdi # &ptregs -> arg1
355 xorl %esi,%esi # oldset -> arg2
356 call do_notify_resume
357 1: movl $_TIF_NEED_RESCHED,%edi
365 * Certain special system calls that need to save a complete full stack frame.
368 .macro PTREGSCALL label,func,arg
371 leaq \func(%rip),%rax
372 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
373 jmp ptregscall_common
378 PTREGSCALL stub_clone, sys_clone, %r8
379 PTREGSCALL stub_fork, sys_fork, %rdi
380 PTREGSCALL stub_vfork, sys_vfork, %rdi
381 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
382 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
383 PTREGSCALL stub_iopl, sys_iopl, %rsi
385 ENTRY(ptregscall_common)
387 CFI_ADJUST_CFA_OFFSET -8
388 CFI_REGISTER rip, r11
391 CFI_REGISTER rip, r15
392 FIXUP_TOP_OF_STACK %r11
394 RESTORE_TOP_OF_STACK %r11
396 CFI_REGISTER rip, r11
399 CFI_ADJUST_CFA_OFFSET 8
400 CFI_REL_OFFSET rip, 0
407 CFI_ADJUST_CFA_OFFSET -8
408 CFI_REGISTER rip, r11
411 CFI_REGISTER rip, r15
412 FIXUP_TOP_OF_STACK %r11
414 GET_THREAD_INFO(%rcx)
415 bt $TIF_IA32,threadinfo_flags(%rcx)
418 RESTORE_TOP_OF_STACK %r11
420 CFI_REGISTER rip, r11
423 CFI_ADJUST_CFA_OFFSET 8
424 CFI_REL_OFFSET rip, 0
431 jmp int_ret_from_sys_call
435 * sigreturn is special because it needs to restore all registers on return.
436 * This cannot be done with SYSRET, so use the IRET return path instead.
438 ENTRY(stub_rt_sigreturn)
441 CFI_ADJUST_CFA_OFFSET -8
444 FIXUP_TOP_OF_STACK %r11
445 call sys_rt_sigreturn
446 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
448 jmp int_ret_from_sys_call
452 * initial frame state for interrupts and exceptions
456 CFI_DEF_CFA rsp,SS+8-\ref
457 /*CFI_REL_OFFSET ss,SS-\ref*/
458 CFI_REL_OFFSET rsp,RSP-\ref
459 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
460 /*CFI_REL_OFFSET cs,CS-\ref*/
461 CFI_REL_OFFSET rip,RIP-\ref
464 /* initial frame state for interrupts (and exceptions without error code) */
465 #define INTR_FRAME _frame RIP
466 /* initial frame state for exceptions with error code (and interrupts with
467 vector already pushed) */
468 #define XCPT_FRAME _frame ORIG_RAX
471 * Interrupt entry/exit.
473 * Interrupt entry points save only callee clobbered registers in fast path.
475 * Entry runs with interrupts off.
478 /* 0(%rsp): interrupt number */
479 .macro interrupt func
481 #ifdef CONFIG_DEBUG_INFO
485 * Setup a stack frame pointer. This allows gdb to trace
486 * back to the original stack.
489 CFI_DEF_CFA_REGISTER rbp
492 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
497 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
498 movq %gs:pda_irqstackptr,%rax
499 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
500 pushq %rdi # save old stack
501 CFI_ADJUST_CFA_OFFSET 8
505 ENTRY(common_interrupt)
508 /* 0(%rsp): oldrsp-ARGOFFSET */
511 CFI_ADJUST_CFA_OFFSET -8
513 decl %gs:pda_irqcount
514 #ifdef CONFIG_DEBUG_INFO
516 CFI_DEF_CFA_REGISTER rsp
518 leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
520 GET_THREAD_INFO(%rcx)
521 testl $3,CS-ARGOFFSET(%rsp)
524 /* Interrupt came from user space */
526 * Has a correct top of stack, but a partial stack frame
527 * %rcx: thread info. Interrupts off.
529 retint_with_reschedule:
530 movl $_TIF_WORK_MASK,%edi
532 movl threadinfo_flags(%rcx),%edx
544 .section __ex_table,"a"
545 .quad iret_label,bad_iret
548 /* force a signal here? this matches i386 behaviour */
549 /* running with kernel gs */
551 movq $-9999,%rdi /* better code? */
555 /* edi: workmask, edx: work */
558 bt $TIF_NEED_RESCHED,%edx
562 CFI_ADJUST_CFA_OFFSET 8
565 CFI_ADJUST_CFA_OFFSET -8
566 GET_THREAD_INFO(%rcx)
571 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
575 movq $-1,ORIG_RAX(%rsp)
576 xorl %esi,%esi # oldset
577 movq %rsp,%rdi # &pt_regs
578 call do_notify_resume
581 movl $_TIF_NEED_RESCHED,%edi
582 GET_THREAD_INFO(%rcx)
585 #ifdef CONFIG_PREEMPT
586 /* Returning to kernel space. Check if we need preemption */
587 /* rcx: threadinfo. interrupts off. */
590 cmpl $0,threadinfo_preempt_count(%rcx)
591 jnz retint_restore_args
592 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
593 jnc retint_restore_args
594 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
595 jnc retint_restore_args
596 call preempt_schedule_irq
604 .macro apicinterrupt num,func
607 CFI_ADJUST_CFA_OFFSET 8
613 ENTRY(thermal_interrupt)
614 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
616 ENTRY(threshold_interrupt)
617 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
620 ENTRY(reschedule_interrupt)
621 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
623 .macro INVALIDATE_ENTRY num
624 ENTRY(invalidate_interrupt\num)
625 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
637 ENTRY(call_function_interrupt)
638 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
641 #ifdef CONFIG_X86_LOCAL_APIC
642 ENTRY(apic_timer_interrupt)
643 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
645 ENTRY(error_interrupt)
646 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
648 ENTRY(spurious_interrupt)
649 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
653 * Exception entry points.
657 pushq $0 /* push error code/oldrax */
658 CFI_ADJUST_CFA_OFFSET 8
659 pushq %rax /* push real oldrax to the rdi slot */
660 CFI_ADJUST_CFA_OFFSET 8
666 .macro errorentry sym
669 CFI_ADJUST_CFA_OFFSET 8
675 /* error code is on the stack already */
676 /* handle NMI like exceptions that can happen everywhere */
680 .macro paranoidentry sym, ist=0
684 movl $MSR_GS_BASE,%ecx
692 movq %gs:pda_data_offset, %rbp
695 movq ORIG_RAX(%rsp),%rsi
696 movq $-1,ORIG_RAX(%rsp)
698 subq $EXCEPTION_STACK_SIZE, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
702 addq $EXCEPTION_STACK_SIZE, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
708 * Exception entry point. This expects an error code/orig_rax on the stack
709 * and the exception handler in %rax.
713 /* rdi slot contains rax, oldrax contains error code */
716 CFI_ADJUST_CFA_OFFSET (14*8)
718 CFI_REL_OFFSET rsi,RSI
719 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
721 CFI_REL_OFFSET rdx,RDX
723 CFI_REL_OFFSET rcx,RCX
724 movq %rsi,10*8(%rsp) /* store rax */
725 CFI_REL_OFFSET rax,RAX
731 CFI_REL_OFFSET r10,R10
733 CFI_REL_OFFSET r11,R11
735 CFI_REL_OFFSET rbx,RBX
737 CFI_REL_OFFSET rbp,RBP
739 CFI_REL_OFFSET r12,R12
741 CFI_REL_OFFSET r13,R13
743 CFI_REL_OFFSET r14,R14
745 CFI_REL_OFFSET r15,R15
754 movq ORIG_RAX(%rsp),%rsi /* get error code */
755 movq $-1,ORIG_RAX(%rsp)
757 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
762 GET_THREAD_INFO(%rcx)
765 movl threadinfo_flags(%rcx),%edx
766 movl $_TIF_WORK_MASK,%edi
776 /* There are two places in the kernel that can potentially fault with
777 usergs. Handle them here. The exception handlers after
778 iret run with kernel gs again, so don't set the user space flag.
779 B stepping K8s sometimes report an truncated RIP for IRET
780 exceptions returning to compat mode. Check for these here too. */
781 leaq iret_label(%rip),%rbp
784 movl %ebp,%ebp /* zero extend */
787 cmpq $gs_change,RIP(%rsp)
791 /* Reload gs selector with exception handling */
792 /* edi: new selector */
796 CFI_ADJUST_CFA_OFFSET 8
801 2: mfence /* workaround */
804 CFI_ADJUST_CFA_OFFSET -8
808 .section __ex_table,"a"
810 .quad gs_change,bad_gs
813 /* running with kernelgs */
815 swapgs /* switch back to user gs */
822 * Create a kernel thread.
824 * C extern interface:
825 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
827 * asm input arguments:
828 * rdi: fn, rsi: arg, rdx: flags
832 FAKE_STACK_FRAME $child_rip
835 # rdi: flags, rsi: usp, rdx: will be &pt_regs
837 orq kernel_thread_flags(%rip),%rdi
850 * It isn't worth to check for reschedule here,
851 * so internally to the x86_64 port you can rely on kernel_thread()
852 * not to reschedule the child before returning, this avoids the need
853 * of hacks for example to fork off the per-CPU idle tasks.
854 * [Hopefully no generic code relies on the reschedule -AK]
864 * Here we are in the child and the registers are set as they were
865 * at kernel_thread() invocation in the parent.
875 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
877 * C extern interface:
878 * extern long execve(char *name, char **argv, char **envp)
880 * asm input arguments:
881 * rdi: name, rsi: argv, rdx: envp
883 * We want to fallback into:
884 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
886 * do_sys_execve asm fallback arguments:
887 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
897 je int_ret_from_sys_call
903 KPROBE_ENTRY(page_fault)
904 errorentry do_page_fault
907 ENTRY(coprocessor_error)
908 zeroentry do_coprocessor_error
910 ENTRY(simd_coprocessor_error)
911 zeroentry do_simd_coprocessor_error
913 ENTRY(device_not_available)
914 zeroentry math_state_restore
916 /* runs on exception stack */
920 CFI_ADJUST_CFA_OFFSET 8
921 paranoidentry do_debug, DEBUG_IST
926 /* runs on exception stack */
930 CFI_ADJUST_CFA_OFFSET 8
933 * "Paranoid" exit path from exception stack.
934 * Paranoid because this is used by NMIs and cannot take
935 * any kernel state for granted.
936 * We don't do kernel preemption checks here, because only
937 * NMI should be common and it does not enable IRQs and
938 * cannot get reschedule ticks.
940 /* ebx: no swapgs flag */
942 testl %ebx,%ebx /* swapgs needed? */
945 jnz paranoid_userspace
952 GET_THREAD_INFO(%rcx)
953 movl threadinfo_flags(%rcx),%ebx
954 andl $_TIF_WORK_MASK,%ebx
956 movq %rsp,%rdi /* &pt_regs */
958 movq %rax,%rsp /* switch stack for scheduling */
959 testl $_TIF_NEED_RESCHED,%ebx
960 jnz paranoid_schedule
961 movl %ebx,%edx /* arg3: thread flags */
963 xorl %esi,%esi /* arg2: oldset */
964 movq %rsp,%rdi /* arg1: &pt_regs */
965 call do_notify_resume
967 jmp paranoid_userspace
972 jmp paranoid_userspace
978 CFI_ADJUST_CFA_OFFSET 8
979 paranoidentry do_int3, DEBUG_IST
985 zeroentry do_overflow
991 zeroentry do_invalid_op
993 ENTRY(coprocessor_segment_overrun)
994 zeroentry do_coprocessor_segment_overrun
997 zeroentry do_reserved
999 /* runs on exception stack */
1002 paranoidentry do_double_fault
1007 errorentry do_invalid_TSS
1009 ENTRY(segment_not_present)
1010 errorentry do_segment_not_present
1012 /* runs on exception stack */
1013 ENTRY(stack_segment)
1015 paranoidentry do_stack_segment
1019 KPROBE_ENTRY(general_protection)
1020 errorentry do_general_protection
1023 ENTRY(alignment_check)
1024 errorentry do_alignment_check
1027 zeroentry do_divide_error
1029 ENTRY(spurious_interrupt_bug)
1030 zeroentry do_spurious_interrupt_bug
1032 #ifdef CONFIG_X86_MCE
1033 /* runs on exception stack */
1034 ENTRY(machine_check)
1037 CFI_ADJUST_CFA_OFFSET 8
1038 paranoidentry do_machine_check
1045 movq %gs:pda_irqstackptr,%rax
1047 CFI_DEF_CFA_REGISTER rdx
1048 incl %gs:pda_irqcount
1051 /*todo CFI_DEF_CFA_EXPRESSION ...*/
1054 CFI_DEF_CFA_REGISTER rsp
1055 decl %gs:pda_irqcount