3 * Copyright (C) 1991, 1992 Linus Torvalds
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
17 * Stack layout in 'syscall_exit':
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
41 * "current" is in register %ebx during any slow entries.
44 #include <linux/linkage.h>
45 #include <asm/thread_info.h>
46 #include <asm/irqflags.h>
47 #include <asm/errno.h>
48 #include <asm/segment.h>
50 #include <asm/page_types.h>
51 #include <asm/percpu.h>
52 #include <asm/dwarf2.h>
53 #include <asm/processor-flags.h>
54 #include <asm/ftrace.h>
55 #include <asm/irq_vectors.h>
56 #include <asm/cpufeature.h>
57 #include <asm/alternative-asm.h>
59 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
60 #include <linux/elf-em.h>
61 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
62 #define __AUDIT_ARCH_LE 0x40000000
64 #ifndef CONFIG_AUDITSYSCALL
65 #define sysenter_audit syscall_trace_entry
66 #define sysexit_audit syscall_exit_work
69 .section .entry.text, "ax"
72 * We use macros for low-level operations which need to be overridden
73 * for paravirtualization. The following will never clobber any registers:
74 * INTERRUPT_RETURN (aka. "iret")
75 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
76 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
78 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
79 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
80 * Allowing a register to be clobbered can shrink the paravirt replacement
81 * enough to patch inline, increasing performance.
84 #define nr_syscalls ((syscall_table_size)/4)
87 #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
89 #define preempt_stop(clobbers)
90 #define resume_kernel restore_all
93 .macro TRACE_IRQS_IRET
94 #ifdef CONFIG_TRACE_IRQFLAGS
95 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
103 * User gs save/restore
105 * %gs is used for userland TLS and kernel only uses it for stack
106 * canary which is required to be at %gs:20 by gcc. Read the comment
107 * at the top of stackprotector.h for more info.
109 * Local labels 98 and 99 are used.
111 #ifdef CONFIG_X86_32_LAZY_GS
113 /* unfortunately push/pop can't be no-op */
118 addl $(4 + \pop), %esp
119 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
124 /* all the rest are no-op */
131 .macro REG_TO_PTGS reg
133 .macro SET_KERNEL_GS reg
136 #else /* CONFIG_X86_32_LAZY_GS */
140 /*CFI_REL_OFFSET gs, 0*/
148 CFI_ADJUST_CFA_OFFSET -\pop
152 .pushsection .fixup, "ax"
155 .section __ex_table, "a"
162 98: mov PT_GS(%esp), %gs
165 .pushsection .fixup, "ax"
166 99: movl $0, PT_GS(%esp)
168 .section __ex_table, "a"
176 /*CFI_REGISTER gs, \reg*/
178 .macro REG_TO_PTGS reg
179 movl \reg, PT_GS(%esp)
180 /*CFI_REL_OFFSET gs, PT_GS*/
182 .macro SET_KERNEL_GS reg
183 movl $(__KERNEL_STACK_CANARY), \reg
187 #endif /* CONFIG_X86_32_LAZY_GS */
193 /*CFI_REL_OFFSET fs, 0;*/
195 /*CFI_REL_OFFSET es, 0;*/
197 /*CFI_REL_OFFSET ds, 0;*/
199 CFI_REL_OFFSET eax, 0
201 CFI_REL_OFFSET ebp, 0
203 CFI_REL_OFFSET edi, 0
205 CFI_REL_OFFSET esi, 0
207 CFI_REL_OFFSET edx, 0
209 CFI_REL_OFFSET ecx, 0
211 CFI_REL_OFFSET ebx, 0
212 movl $(__USER_DS), %edx
215 movl $(__KERNEL_PERCPU), %edx
220 .macro RESTORE_INT_REGS
237 .macro RESTORE_REGS pop=0
246 .pushsection .fixup, "ax"
253 .section __ex_table, "a"
262 .macro RING0_INT_FRAME
266 /*CFI_OFFSET cs, -2*4;*/
270 .macro RING0_EC_FRAME
274 /*CFI_OFFSET cs, -2*4;*/
278 .macro RING0_PTREGS_FRAME
281 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
282 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
283 CFI_OFFSET eip, PT_EIP-PT_OLDESP
284 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
285 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
286 CFI_OFFSET eax, PT_EAX-PT_OLDESP
287 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
288 CFI_OFFSET edi, PT_EDI-PT_OLDESP
289 CFI_OFFSET esi, PT_ESI-PT_OLDESP
290 CFI_OFFSET edx, PT_EDX-PT_OLDESP
291 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
292 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
299 GET_THREAD_INFO(%ebp)
301 pushl_cfi $0x0202 # Reset kernel eflags
308 * Interrupt exit functions should be protected against kprobes
310 .pushsection .kprobes.text, "ax"
312 * Return to user mode is not as complex as all this looks,
313 * but we want the default path for a system call return to
314 * go as quickly as possible which is why some of this is
315 * less clear than it otherwise should be.
318 # userspace resumption stub bypassing syscall exit tracing
322 preempt_stop(CLBR_ANY)
324 GET_THREAD_INFO(%ebp)
325 resume_userspace_sig:
327 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
328 movb PT_CS(%esp), %al
329 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
332 * We can be coming here from a syscall done in the kernel space,
333 * e.g. a failed kernel_execve().
335 movl PT_CS(%esp), %eax
336 andl $SEGMENT_RPL_MASK, %eax
339 jb resume_kernel # not returning to v8086 or userspace
341 ENTRY(resume_userspace)
343 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
344 # setting need_resched or sigpending
345 # between sampling and the iret
347 movl TI_flags(%ebp), %ecx
348 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
349 # int/exception return?
352 END(ret_from_exception)
354 #ifdef CONFIG_PREEMPT
356 DISABLE_INTERRUPTS(CLBR_ANY)
357 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
360 movl TI_flags(%ebp), %ecx # need_resched set ?
361 testb $_TIF_NEED_RESCHED, %cl
363 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
365 call preempt_schedule_irq
371 * End of kprobes section
375 /* SYSENTER_RETURN points to after the "sysenter" instruction in
376 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
378 # sysenter call handler stub
379 ENTRY(ia32_sysenter_target)
383 CFI_REGISTER esp, ebp
384 movl TSS_sysenter_sp0(%esp),%esp
387 * Interrupts are disabled here, but we can't trace it until
388 * enough kernel state to call TRACE_IRQS_OFF can be called - but
389 * we immediately enable interrupts at that point anyway.
392 /*CFI_REL_OFFSET ss, 0*/
394 CFI_REL_OFFSET esp, 0
396 orl $X86_EFLAGS_IF, (%esp)
398 /*CFI_REL_OFFSET cs, 0*/
400 * Push current_thread_info()->sysenter_return to the stack.
401 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
402 * pushed above; +8 corresponds to copy_thread's esp0 setting.
404 pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
405 CFI_REL_OFFSET eip, 0
409 ENABLE_INTERRUPTS(CLBR_NONE)
412 * Load the potential sixth argument from user stack.
413 * Careful about security.
415 cmpl $__PAGE_OFFSET-3,%ebp
418 movl %ebp,PT_EBP(%esp)
419 .section __ex_table,"a"
421 .long 1b,syscall_fault
424 GET_THREAD_INFO(%ebp)
426 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
429 cmpl $(nr_syscalls), %eax
431 call *sys_call_table(,%eax,4)
433 movl %eax,PT_EAX(%esp)
435 DISABLE_INTERRUPTS(CLBR_ANY)
437 movl TI_flags(%ebp), %ecx
438 testl $_TIF_ALLWORK_MASK, %ecx
441 /* if something modifies registers it must also disable sysexit */
442 movl PT_EIP(%esp), %edx
443 movl PT_OLDESP(%esp), %ecx
446 1: mov PT_FS(%esp), %fs
448 ENABLE_INTERRUPTS_SYSEXIT
450 #ifdef CONFIG_AUDITSYSCALL
452 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
453 jnz syscall_trace_entry
455 CFI_ADJUST_CFA_OFFSET -4
456 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
457 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
458 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
459 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
460 movl %eax,%edx /* 2nd arg: syscall number */
461 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
462 call audit_syscall_entry
464 movl PT_EAX(%esp),%eax /* reload syscall number */
468 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
469 jne syscall_exit_work
471 ENABLE_INTERRUPTS(CLBR_ANY)
472 movl %eax,%edx /* second arg, syscall return value */
473 cmpl $0,%eax /* is it < 0? */
474 setl %al /* 1 if so, 0 if not */
475 movzbl %al,%eax /* zero-extend that */
476 inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
477 call audit_syscall_exit
478 DISABLE_INTERRUPTS(CLBR_ANY)
480 movl TI_flags(%ebp), %ecx
481 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
482 jne syscall_exit_work
483 movl PT_EAX(%esp),%eax /* reload syscall return value */
488 .pushsection .fixup,"ax"
489 2: movl $0,PT_FS(%esp)
491 .section __ex_table,"a"
496 ENDPROC(ia32_sysenter_target)
499 * syscall stub including irq exit should be protected against kprobes
501 .pushsection .kprobes.text, "ax"
502 # system call handler stub
504 RING0_INT_FRAME # can't unwind into user space anyway
505 pushl_cfi %eax # save orig_eax
507 GET_THREAD_INFO(%ebp)
508 # system call tracing in operation / emulation
509 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
510 jnz syscall_trace_entry
511 cmpl $(nr_syscalls), %eax
514 call *sys_call_table(,%eax,4)
516 movl %eax,PT_EAX(%esp) # store the return value
519 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
520 # setting need_resched or sigpending
521 # between sampling and the iret
523 movl TI_flags(%ebp), %ecx
524 testl $_TIF_ALLWORK_MASK, %ecx # current->work
525 jne syscall_exit_work
530 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
531 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
532 # are returning to the kernel.
533 # See comments in process.c:copy_thread() for details.
534 movb PT_OLDSS(%esp), %ah
535 movb PT_CS(%esp), %al
536 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
537 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
539 je ldt_ss # returning to user-space with LDT SS
541 RESTORE_REGS 4 # skip orig_eax/error_code
546 pushl $0 # no error code
550 .section __ex_table,"a"
552 .long irq_return,iret_exc
557 #ifdef CONFIG_PARAVIRT
559 * The kernel can't run on a non-flat stack if paravirt mode
560 * is active. Rather than try to fixup the high bits of
561 * ESP, bypass this code entirely. This may break DOSemu
562 * and/or Wine support in a paravirt VM, although the option
563 * is still available to implement the setting of the high
564 * 16-bits in the INTERRUPT_RETURN paravirt-op.
566 cmpl $0, pv_info+PARAVIRT_enabled
571 * Setup and switch to ESPFIX stack
573 * We're returning to userspace with a 16 bit stack. The CPU will not
574 * restore the high word of ESP for us on executing iret... This is an
575 * "official" bug of all the x86-compatible CPUs, which we can work
576 * around to make dosemu and wine happy. We do this by preloading the
577 * high word of ESP with the high word of the userspace ESP while
578 * compensating for the offset by changing to the ESPFIX segment with
579 * a base address that matches for the difference.
581 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
582 mov %esp, %edx /* load kernel esp */
583 mov PT_OLDESP(%esp), %eax /* load userspace esp */
584 mov %dx, %ax /* eax: new kernel esp */
585 sub %eax, %edx /* offset (low word is 0) */
587 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
588 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
589 pushl_cfi $__ESPFIX_SS
590 pushl_cfi %eax /* new kernel esp */
591 /* Disable interrupts, but do not irqtrace this section: we
592 * will soon execute iret and the tracer was already set to
593 * the irqstate after the iret */
594 DISABLE_INTERRUPTS(CLBR_EAX)
595 lss (%esp), %esp /* switch to espfix segment */
596 CFI_ADJUST_CFA_OFFSET -8
601 # perform work that needs to be done immediately before resumption
603 RING0_PTREGS_FRAME # can't unwind into user space anyway
605 testb $_TIF_NEED_RESCHED, %cl
610 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
611 # setting need_resched or sigpending
612 # between sampling and the iret
614 movl TI_flags(%ebp), %ecx
615 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
616 # than syscall tracing?
618 testb $_TIF_NEED_RESCHED, %cl
621 work_notifysig: # deal with pending signals and
622 # notify-resume requests
624 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
626 jne work_notifysig_v86 # returning to kernel-space or
629 call do_notify_resume
630 jmp resume_userspace_sig
634 pushl_cfi %ecx # save ti_flags for do_notify_resume
635 call save_v86_state # %eax contains pt_regs pointer
642 call do_notify_resume
643 jmp resume_userspace_sig
646 # perform syscall exit tracing
649 movl $-ENOSYS,PT_EAX(%esp)
651 call syscall_trace_enter
652 /* What it returned is what we'll actually use. */
653 cmpl $(nr_syscalls), %eax
656 END(syscall_trace_entry)
658 # perform syscall exit tracing
661 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
664 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
667 call syscall_trace_leave
669 END(syscall_exit_work)
672 RING0_INT_FRAME # can't unwind into user space anyway
674 GET_THREAD_INFO(%ebp)
675 movl $-EFAULT,PT_EAX(%esp)
681 jmp syscall_after_call
686 jmp sysenter_after_call
690 * End of kprobes section
695 * System calls that need a pt_regs pointer.
697 #define PTREGSCALL0(name) \
703 #define PTREGSCALL1(name) \
707 movl (PT_EBX+4)(%esp),%eax; \
710 #define PTREGSCALL2(name) \
714 movl (PT_ECX+4)(%esp),%edx; \
715 movl (PT_EBX+4)(%esp),%eax; \
718 #define PTREGSCALL3(name) \
724 movl PT_EDX(%eax),%ecx; \
725 movl PT_ECX(%eax),%edx; \
726 movl PT_EBX(%eax),%eax; \
729 CFI_ADJUST_CFA_OFFSET -4; \
732 ENDPROC(ptregs_##name)
738 PTREGSCALL2(sigaltstack)
739 PTREGSCALL0(sigreturn)
740 PTREGSCALL0(rt_sigreturn)
744 /* Clone is an oddball. The 4th arg is in %edi */
750 pushl_cfi PT_EDI(%eax)
751 movl PT_EDX(%eax),%ecx
752 movl PT_ECX(%eax),%edx
753 movl PT_EBX(%eax),%eax
756 CFI_ADJUST_CFA_OFFSET -8
759 ENDPROC(ptregs_clone)
761 .macro FIXUP_ESPFIX_STACK
763 * Switch back for ESPFIX stack to the normal zerobased stack
765 * We can't call C functions using the ESPFIX stack. This code reads
766 * the high word of the segment base from the GDT and swiches to the
767 * normal stack and adjusts ESP with the matching offset.
769 /* fixup the stack */
770 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
771 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
773 addl %esp, %eax /* the adjusted stack pointer */
774 pushl_cfi $__KERNEL_DS
776 lss (%esp), %esp /* switch to the normal stack segment */
777 CFI_ADJUST_CFA_OFFSET -8
779 .macro UNWIND_ESPFIX_STACK
781 /* see if on espfix stack */
782 cmpw $__ESPFIX_SS, %ax
784 movl $__KERNEL_DS, %eax
787 /* switch to normal stack */
793 * Build the entry stubs and pointer table with some assembler magic.
794 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
795 * single cache line on all modern x86 implementations.
797 .section .init.rodata,"a"
799 .section .entry.text, "ax"
801 .p2align CONFIG_X86_L1_CACHE_SHIFT
802 ENTRY(irq_entries_start)
804 vector=FIRST_EXTERNAL_VECTOR
805 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
808 .if vector < NR_VECTORS
809 .if vector <> FIRST_EXTERNAL_VECTOR
810 CFI_ADJUST_CFA_OFFSET -4
812 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
813 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
818 .section .entry.text, "ax"
822 2: jmp common_interrupt
824 END(irq_entries_start)
831 * the CPU automatically disables interrupts when executing an IRQ vector,
832 * so IRQ-flags tracing has to follow that:
834 .p2align CONFIG_X86_L1_CACHE_SHIFT
836 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
842 ENDPROC(common_interrupt)
846 * Irq entries should be protected against kprobes
848 .pushsection .kprobes.text, "ax"
849 #define BUILD_INTERRUPT3(name, nr, fn) \
861 #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
863 /* The include is where all of the SMP etc. interrupts come from */
864 #include <asm/entry_arch.h>
866 ENTRY(coprocessor_error)
869 pushl_cfi $do_coprocessor_error
872 END(coprocessor_error)
874 ENTRY(simd_coprocessor_error)
877 #ifdef CONFIG_X86_INVD_BUG
878 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
879 661: pushl_cfi $do_general_protection
881 .section .altinstructions,"a"
882 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
884 .section .altinstr_replacement,"ax"
885 663: pushl $do_simd_coprocessor_error
889 pushl_cfi $do_simd_coprocessor_error
893 END(simd_coprocessor_error)
895 ENTRY(device_not_available)
897 pushl_cfi $-1 # mark this as an int
898 pushl_cfi $do_device_not_available
901 END(device_not_available)
903 #ifdef CONFIG_PARAVIRT
906 .section __ex_table,"a"
908 .long native_iret, iret_exc
912 ENTRY(native_irq_enable_sysexit)
915 END(native_irq_enable_sysexit)
921 pushl_cfi $do_overflow
937 pushl_cfi $do_invalid_op
942 ENTRY(coprocessor_segment_overrun)
945 pushl_cfi $do_coprocessor_segment_overrun
948 END(coprocessor_segment_overrun)
952 pushl_cfi $do_invalid_TSS
957 ENTRY(segment_not_present)
959 pushl_cfi $do_segment_not_present
962 END(segment_not_present)
966 pushl_cfi $do_stack_segment
971 ENTRY(alignment_check)
973 pushl_cfi $do_alignment_check
980 pushl_cfi $0 # no error code
981 pushl_cfi $do_divide_error
986 #ifdef CONFIG_X86_MCE
990 pushl_cfi machine_check_vector
996 ENTRY(spurious_interrupt_bug)
999 pushl_cfi $do_spurious_interrupt_bug
1002 END(spurious_interrupt_bug)
1004 * End of kprobes section
1008 ENTRY(kernel_thread_helper)
1009 pushl $0 # fake return address for unwinder
1014 ud2 # padding for call trace
1016 ENDPROC(kernel_thread_helper)
1019 /* Xen doesn't set %esp to be precisely what the normal sysenter
1020 entrypoint expects, so fix it up before using the normal path. */
1021 ENTRY(xen_sysenter_target)
1023 addl $5*4, %esp /* remove xen-provided frame */
1024 CFI_ADJUST_CFA_OFFSET -5*4
1025 jmp sysenter_past_esp
1028 ENTRY(xen_hypervisor_callback)
1030 pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1034 /* Check to see if we got the event in the critical
1035 region in xen_iret_direct, after we've reenabled
1036 events and checked for pending events. This simulates
1037 iret instruction's behaviour where it delivers a
1038 pending interrupt when enabling interrupts. */
1039 movl PT_EIP(%esp),%eax
1040 cmpl $xen_iret_start_crit,%eax
1042 cmpl $xen_iret_end_crit,%eax
1045 jmp xen_iret_crit_fixup
1047 ENTRY(xen_do_upcall)
1049 call xen_evtchn_do_upcall
1052 ENDPROC(xen_hypervisor_callback)
1054 # Hypervisor uses this for application faults while it executes.
1055 # We get here for two reasons:
1056 # 1. Fault while reloading DS, ES, FS or GS
1057 # 2. Fault while executing IRET
1058 # Category 1 we fix up by reattempting the load, and zeroing the segment
1059 # register if the load fails.
1060 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
1061 # normal Linux return path in this case because if we use the IRET hypercall
1062 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1063 # We distinguish between categories by maintaining a status value in EAX.
1064 ENTRY(xen_failsafe_callback)
1072 /* EAX == 0 => Category 1 (Bad segment)
1073 EAX != 0 => Category 2 (Bad IRET) */
1077 CFI_ADJUST_CFA_OFFSET -16
1080 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1082 jmp ret_from_exception
1085 .section .fixup,"ax"
1099 .section __ex_table,"a"
1106 ENDPROC(xen_failsafe_callback)
1108 BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
1109 xen_evtchn_do_upcall)
1111 #endif /* CONFIG_XEN */
1113 #ifdef CONFIG_FUNCTION_TRACER
1114 #ifdef CONFIG_DYNAMIC_FTRACE
1120 ENTRY(ftrace_caller)
1121 cmpl $0, function_trace_stop
1127 movl 0xc(%esp), %eax
1128 movl 0x4(%ebp), %edx
1129 subl $MCOUNT_INSN_SIZE, %eax
1138 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1139 .globl ftrace_graph_call
1149 #else /* ! CONFIG_DYNAMIC_FTRACE */
1152 cmpl $0, function_trace_stop
1155 cmpl $ftrace_stub, ftrace_trace_function
1157 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1158 cmpl $ftrace_stub, ftrace_graph_return
1159 jnz ftrace_graph_caller
1161 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1162 jnz ftrace_graph_caller
1168 /* taken from glibc */
1173 movl 0xc(%esp), %eax
1174 movl 0x4(%ebp), %edx
1175 subl $MCOUNT_INSN_SIZE, %eax
1177 call *ftrace_trace_function
1184 #endif /* CONFIG_DYNAMIC_FTRACE */
1185 #endif /* CONFIG_FUNCTION_TRACER */
1187 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1188 ENTRY(ftrace_graph_caller)
1189 cmpl $0, function_trace_stop
1195 movl 0xc(%esp), %edx
1198 subl $MCOUNT_INSN_SIZE, %edx
1199 call prepare_ftrace_return
1204 END(ftrace_graph_caller)
1206 .globl return_to_handler
1211 call ftrace_return_to_handler
1218 .section .rodata,"a"
1219 #include "syscall_table_32.S"
1221 syscall_table_size=(.-sys_call_table)
1224 * Some functions should be protected against kprobes
1226 .pushsection .kprobes.text, "ax"
1230 pushl_cfi $do_page_fault
1233 /* the function address is in %gs's slot on the stack */
1235 /*CFI_REL_OFFSET fs, 0*/
1237 /*CFI_REL_OFFSET es, 0*/
1239 /*CFI_REL_OFFSET ds, 0*/
1241 CFI_REL_OFFSET eax, 0
1243 CFI_REL_OFFSET ebp, 0
1245 CFI_REL_OFFSET edi, 0
1247 CFI_REL_OFFSET esi, 0
1249 CFI_REL_OFFSET edx, 0
1251 CFI_REL_OFFSET ecx, 0
1253 CFI_REL_OFFSET ebx, 0
1255 movl $(__KERNEL_PERCPU), %ecx
1259 movl PT_GS(%esp), %edi # get the function address
1260 movl PT_ORIG_EAX(%esp), %edx # get the error code
1261 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1264 movl $(__USER_DS), %ecx
1268 movl %esp,%eax # pt_regs pointer
1270 jmp ret_from_exception
1275 * Debug traps and NMI can happen at the one SYSENTER instruction
1276 * that sets up the real kernel stack. Check here, since we can't
1277 * allow the wrong stack to be used.
1279 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1280 * already pushed 3 words if it hits on the sysenter instruction:
1281 * eflags, cs and eip.
1283 * We just load the right stack, and push the three (known) values
1284 * by hand onto the new stack - while updating the return eip past
1285 * the instruction that would have done it for sysenter.
1287 .macro FIX_STACK offset ok label
1288 cmpw $__KERNEL_CS, 4(%esp)
1291 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1295 pushl_cfi $__KERNEL_CS
1296 pushl_cfi $sysenter_past_esp
1297 CFI_REL_OFFSET eip, 0
1302 cmpl $ia32_sysenter_target,(%esp)
1303 jne debug_stack_correct
1304 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1305 debug_stack_correct:
1306 pushl_cfi $-1 # mark this as an int
1309 xorl %edx,%edx # error code 0
1310 movl %esp,%eax # pt_regs pointer
1312 jmp ret_from_exception
1317 * NMI is doubly nasty. It can happen _while_ we're handling
1318 * a debug fault, and the debug fault hasn't yet been able to
1319 * clear up the stack. So we first check whether we got an
1320 * NMI on the sysenter entry path, but after that we need to
1321 * check whether we got an NMI on the debug path where the debug
1322 * fault happened on the sysenter path.
1328 cmpw $__ESPFIX_SS, %ax
1331 cmpl $ia32_sysenter_target,(%esp)
1335 /* Do not access memory above the end of our stack page,
1336 * it might not exist.
1338 andl $(THREAD_SIZE-1),%eax
1339 cmpl $(THREAD_SIZE-20),%eax
1341 jae nmi_stack_correct
1342 cmpl $ia32_sysenter_target,12(%esp)
1343 je nmi_debug_stack_check
1345 /* We have a RING0_INT_FRAME here */
1348 xorl %edx,%edx # zero error code
1349 movl %esp,%eax # pt_regs pointer
1351 jmp restore_all_notrace
1356 FIX_STACK 12, nmi_stack_correct, 1
1357 jmp nmi_stack_correct
1359 nmi_debug_stack_check:
1360 /* We have a RING0_INT_FRAME here */
1361 cmpw $__KERNEL_CS,16(%esp)
1362 jne nmi_stack_correct
1364 jb nmi_stack_correct
1365 cmpl $debug_esp_fix_insn,(%esp)
1366 ja nmi_stack_correct
1367 FIX_STACK 24, nmi_stack_correct, 1
1368 jmp nmi_stack_correct
1371 /* We have a RING0_INT_FRAME here.
1373 * create the pointer to lss back
1378 /* copy the iret frame of 12 bytes */
1384 FIXUP_ESPFIX_STACK # %eax == %esp
1385 xorl %edx,%edx # zero error code
1388 lss 12+4(%esp), %esp # back to espfix stack
1389 CFI_ADJUST_CFA_OFFSET -24
1396 pushl_cfi $-1 # mark this as an int
1399 xorl %edx,%edx # zero error code
1400 movl %esp,%eax # pt_regs pointer
1402 jmp ret_from_exception
1406 ENTRY(general_protection)
1408 pushl_cfi $do_general_protection
1411 END(general_protection)
1413 #ifdef CONFIG_KVM_GUEST
1414 ENTRY(async_page_fault)
1416 pushl_cfi $do_async_page_fault
1419 END(async_page_fault)
1423 * End of kprobes section