2 * linux/arch/i386/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
40 * "current" is in register %ebx during any slow entries.
43 #include <linux/linkage.h>
44 #include <asm/thread_info.h>
45 #include <asm/irqflags.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
51 #include <asm/dwarf2.h>
52 #include "irq_vectors.h"
54 #define nr_syscalls ((syscall_table_size)/4)
79 /* These are replaces for paravirtualization */
80 #define DISABLE_INTERRUPTS cli
81 #define ENABLE_INTERRUPTS sti
82 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
83 #define INTERRUPT_RETURN iret
84 #define GET_CR0_INTO_EAX movl %cr0, %eax
87 #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
90 #define resume_kernel restore_nocheck
93 .macro TRACE_IRQS_IRET
94 #ifdef CONFIG_TRACE_IRQFLAGS
95 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
103 #define resume_userspace_sig check_userspace
105 #define resume_userspace_sig resume_userspace
111 CFI_ADJUST_CFA_OFFSET 4;\
112 /*CFI_REL_OFFSET es, 0;*/\
114 CFI_ADJUST_CFA_OFFSET 4;\
115 /*CFI_REL_OFFSET ds, 0;*/\
117 CFI_ADJUST_CFA_OFFSET 4;\
118 CFI_REL_OFFSET eax, 0;\
120 CFI_ADJUST_CFA_OFFSET 4;\
121 CFI_REL_OFFSET ebp, 0;\
123 CFI_ADJUST_CFA_OFFSET 4;\
124 CFI_REL_OFFSET edi, 0;\
126 CFI_ADJUST_CFA_OFFSET 4;\
127 CFI_REL_OFFSET esi, 0;\
129 CFI_ADJUST_CFA_OFFSET 4;\
130 CFI_REL_OFFSET edx, 0;\
132 CFI_ADJUST_CFA_OFFSET 4;\
133 CFI_REL_OFFSET ecx, 0;\
135 CFI_ADJUST_CFA_OFFSET 4;\
136 CFI_REL_OFFSET ebx, 0;\
137 movl $(__USER_DS), %edx; \
141 #define RESTORE_INT_REGS \
143 CFI_ADJUST_CFA_OFFSET -4;\
146 CFI_ADJUST_CFA_OFFSET -4;\
149 CFI_ADJUST_CFA_OFFSET -4;\
152 CFI_ADJUST_CFA_OFFSET -4;\
155 CFI_ADJUST_CFA_OFFSET -4;\
158 CFI_ADJUST_CFA_OFFSET -4;\
161 CFI_ADJUST_CFA_OFFSET -4;\
164 #define RESTORE_REGS \
167 CFI_ADJUST_CFA_OFFSET -4;\
170 CFI_ADJUST_CFA_OFFSET -4;\
172 .section .fixup,"ax"; \
178 .section __ex_table,"a";\
184 #define RING0_INT_FRAME \
185 CFI_STARTPROC simple;\
186 CFI_DEF_CFA esp, 3*4;\
187 /*CFI_OFFSET cs, -2*4;*/\
190 #define RING0_EC_FRAME \
191 CFI_STARTPROC simple;\
192 CFI_DEF_CFA esp, 4*4;\
193 /*CFI_OFFSET cs, -2*4;*/\
196 #define RING0_PTREGS_FRAME \
197 CFI_STARTPROC simple;\
198 CFI_DEF_CFA esp, OLDESP-EBX;\
199 /*CFI_OFFSET cs, CS-OLDESP;*/\
200 CFI_OFFSET eip, EIP-OLDESP;\
201 /*CFI_OFFSET es, ES-OLDESP;*/\
202 /*CFI_OFFSET ds, DS-OLDESP;*/\
203 CFI_OFFSET eax, EAX-OLDESP;\
204 CFI_OFFSET ebp, EBP-OLDESP;\
205 CFI_OFFSET edi, EDI-OLDESP;\
206 CFI_OFFSET esi, ESI-OLDESP;\
207 CFI_OFFSET edx, EDX-OLDESP;\
208 CFI_OFFSET ecx, ECX-OLDESP;\
209 CFI_OFFSET ebx, EBX-OLDESP
214 CFI_ADJUST_CFA_OFFSET 4
216 GET_THREAD_INFO(%ebp)
218 CFI_ADJUST_CFA_OFFSET -4
219 pushl $0x0202 # Reset kernel eflags
220 CFI_ADJUST_CFA_OFFSET 4
222 CFI_ADJUST_CFA_OFFSET -4
227 * Return to user mode is not as complex as all this looks,
228 * but we want the default path for a system call return to
229 * go as quickly as possible which is why some of this is
230 * less clear than it otherwise should be.
233 # userspace resumption stub bypassing syscall exit tracing
239 GET_THREAD_INFO(%ebp)
241 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
243 testl $(VM_MASK | 3), %eax
245 ENTRY(resume_userspace)
246 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
247 # setting need_resched or sigpending
248 # between sampling and the iret
249 movl TI_flags(%ebp), %ecx
250 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
251 # int/exception return?
255 #ifdef CONFIG_PREEMPT
258 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
261 movl TI_flags(%ebp), %ecx # need_resched set ?
262 testb $_TIF_NEED_RESCHED, %cl
264 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
266 call preempt_schedule_irq
271 /* SYSENTER_RETURN points to after the "sysenter" instruction in
272 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
274 # sysenter call handler stub
275 ENTRY(sysenter_entry)
278 CFI_REGISTER esp, ebp
279 movl TSS_sysenter_esp0(%esp),%esp
282 * No need to follow this irqs on/off section: the syscall
283 * disabled irqs and here we enable it straight after entry:
287 CFI_ADJUST_CFA_OFFSET 4
288 /*CFI_REL_OFFSET ss, 0*/
290 CFI_ADJUST_CFA_OFFSET 4
291 CFI_REL_OFFSET esp, 0
293 CFI_ADJUST_CFA_OFFSET 4
295 CFI_ADJUST_CFA_OFFSET 4
296 /*CFI_REL_OFFSET cs, 0*/
298 * Push current_thread_info()->sysenter_return to the stack.
299 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
300 * pushed above; +8 corresponds to copy_thread's esp0 setting.
302 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
303 CFI_ADJUST_CFA_OFFSET 4
304 CFI_REL_OFFSET eip, 0
307 * Load the potential sixth argument from user stack.
308 * Careful about security.
310 cmpl $__PAGE_OFFSET-3,%ebp
313 .section __ex_table,"a"
315 .long 1b,syscall_fault
319 CFI_ADJUST_CFA_OFFSET 4
321 GET_THREAD_INFO(%ebp)
323 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
324 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
325 jnz syscall_trace_entry
326 cmpl $(nr_syscalls), %eax
328 call *sys_call_table(,%eax,4)
332 movl TI_flags(%ebp), %ecx
333 testw $_TIF_ALLWORK_MASK, %cx
334 jne syscall_exit_work
335 /* if something modifies registers it must also disable sysexit */
337 movl OLDESP(%esp), %ecx
340 ENABLE_INTERRUPTS_SYSEXIT
344 # system call handler stub
346 RING0_INT_FRAME # can't unwind into user space anyway
347 pushl %eax # save orig_eax
348 CFI_ADJUST_CFA_OFFSET 4
350 GET_THREAD_INFO(%ebp)
351 testl $TF_MASK,EFLAGS(%esp)
353 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
355 # system call tracing in operation / emulation
356 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
357 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
358 jnz syscall_trace_entry
359 cmpl $(nr_syscalls), %eax
362 call *sys_call_table(,%eax,4)
363 movl %eax,EAX(%esp) # store the return value
365 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
366 # setting need_resched or sigpending
367 # between sampling and the iret
369 movl TI_flags(%ebp), %ecx
370 testw $_TIF_ALLWORK_MASK, %cx # current->work
371 jne syscall_exit_work
374 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
375 # Warning: OLDSS(%esp) contains the wrong/random values if we
376 # are returning to the kernel.
377 # See comments in process.c:copy_thread() for details.
378 movb OLDSS(%esp), %ah
380 andl $(VM_MASK | (4 << 8) | 3), %eax
381 cmpl $((4 << 8) | 3), %eax
383 je ldt_ss # returning to user-space with LDT SS
386 restore_nocheck_notrace:
389 CFI_ADJUST_CFA_OFFSET -4
395 pushl $0 # no error code
399 .section __ex_table,"a"
406 larl OLDSS(%esp), %eax
408 testl $0x00400000, %eax # returning to 32bit stack?
409 jnz restore_nocheck # allright, normal return
410 /* If returning to userspace with 16bit stack,
411 * try to fix the higher word of ESP, as the CPU
413 * This is an "official" bug of all the x86-compatible
414 * CPUs, which we can try to work around to make
415 * dosemu and wine happy. */
416 subl $8, %esp # reserve space for switch16 pointer
417 CFI_ADJUST_CFA_OFFSET 8
421 /* Set up the 16bit stack frame with switch32 pointer on top,
422 * and a switch16 pointer on top of the current frame. */
423 call setup_x86_bogus_stack
424 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
427 lss 20+4(%esp), %esp # switch to 16bit stack
429 .section __ex_table,"a"
435 # perform work that needs to be done immediately before resumption
437 RING0_PTREGS_FRAME # can't unwind into user space anyway
439 testb $_TIF_NEED_RESCHED, %cl
443 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
444 # setting need_resched or sigpending
445 # between sampling and the iret
447 movl TI_flags(%ebp), %ecx
448 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
449 # than syscall tracing?
451 testb $_TIF_NEED_RESCHED, %cl
454 work_notifysig: # deal with pending signals and
455 # notify-resume requests
456 testl $VM_MASK, EFLAGS(%esp)
458 jne work_notifysig_v86 # returning to kernel-space or
461 call do_notify_resume
462 jmp resume_userspace_sig
467 pushl %ecx # save ti_flags for do_notify_resume
468 CFI_ADJUST_CFA_OFFSET 4
469 call save_v86_state # %eax contains pt_regs pointer
471 CFI_ADJUST_CFA_OFFSET -4
474 call do_notify_resume
475 jmp resume_userspace_sig
478 # perform syscall exit tracing
481 movl $-ENOSYS,EAX(%esp)
484 call do_syscall_trace
486 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
487 # so must skip actual syscall
488 movl ORIG_EAX(%esp), %eax
489 cmpl $(nr_syscalls), %eax
493 # perform syscall exit tracing
496 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
499 ENABLE_INTERRUPTS # could let do_syscall_trace() call
503 call do_syscall_trace
507 RING0_INT_FRAME # can't unwind into user space anyway
509 pushl %eax # save orig_eax
510 CFI_ADJUST_CFA_OFFSET 4
512 GET_THREAD_INFO(%ebp)
513 movl $-EFAULT,EAX(%esp)
517 movl $-ENOSYS,EAX(%esp)
521 #define FIXUP_ESPFIX_STACK \
523 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
524 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
525 /* copy data from 16bit stack to 32bit stack */ \
526 call fixup_x86_bogus_stack; \
527 /* put ESP to the proper location */ \
529 #define UNWIND_ESPFIX_STACK \
531 CFI_ADJUST_CFA_OFFSET 4; \
533 /* see if on 16bit stack */ \
534 cmpw $__ESPFIX_SS, %ax; \
537 CFI_ADJUST_CFA_OFFSET -4; \
538 .section .fixup,"ax"; \
539 28: movl $__KERNEL_DS, %eax; \
542 /* switch to 32bit stack */ \
543 FIXUP_ESPFIX_STACK; \
548 * Build the entry stubs and pointer table with
549 * some assembler magic.
556 ENTRY(irq_entries_start)
561 CFI_ADJUST_CFA_OFFSET -4
564 CFI_ADJUST_CFA_OFFSET 4
573 * the CPU automatically disables interrupts when executing an IRQ vector,
574 * so IRQ-flags tracing has to follow that:
585 #define BUILD_INTERRUPT(name, nr) \
589 CFI_ADJUST_CFA_OFFSET 4; \
597 /* The include is where all of the SMP etc. interrupts come from */
598 #include "entry_arch.h"
600 KPROBE_ENTRY(page_fault)
603 CFI_ADJUST_CFA_OFFSET 4
607 CFI_ADJUST_CFA_OFFSET 4
608 /*CFI_REL_OFFSET ds, 0*/
610 CFI_ADJUST_CFA_OFFSET 4
611 CFI_REL_OFFSET eax, 0
614 CFI_ADJUST_CFA_OFFSET 4
615 CFI_REL_OFFSET ebp, 0
617 CFI_ADJUST_CFA_OFFSET 4
618 CFI_REL_OFFSET edi, 0
620 CFI_ADJUST_CFA_OFFSET 4
621 CFI_REL_OFFSET esi, 0
623 CFI_ADJUST_CFA_OFFSET 4
624 CFI_REL_OFFSET edx, 0
627 CFI_ADJUST_CFA_OFFSET 4
628 CFI_REL_OFFSET ecx, 0
630 CFI_ADJUST_CFA_OFFSET 4
631 CFI_REL_OFFSET ebx, 0
634 CFI_ADJUST_CFA_OFFSET 4
635 /*CFI_REL_OFFSET es, 0*/
638 CFI_ADJUST_CFA_OFFSET -4
639 /*CFI_REGISTER es, ecx*/
640 movl ES(%esp), %edi # get the function address
641 movl ORIG_EAX(%esp), %edx # get the error code
642 movl %eax, ORIG_EAX(%esp)
644 /*CFI_REL_OFFSET es, ES*/
645 movl $(__USER_DS), %ecx
648 movl %esp,%eax # pt_regs pointer
650 jmp ret_from_exception
652 KPROBE_END(page_fault)
654 ENTRY(coprocessor_error)
657 CFI_ADJUST_CFA_OFFSET 4
658 pushl $do_coprocessor_error
659 CFI_ADJUST_CFA_OFFSET 4
663 ENTRY(simd_coprocessor_error)
666 CFI_ADJUST_CFA_OFFSET 4
667 pushl $do_simd_coprocessor_error
668 CFI_ADJUST_CFA_OFFSET 4
672 ENTRY(device_not_available)
674 pushl $-1 # mark this as an int
675 CFI_ADJUST_CFA_OFFSET 4
678 testl $0x4, %eax # EM (math emulation bit)
679 jne device_not_available_emulate
681 call math_state_restore
682 jmp ret_from_exception
683 device_not_available_emulate:
684 pushl $0 # temporary storage for ORIG_EIP
685 CFI_ADJUST_CFA_OFFSET 4
688 CFI_ADJUST_CFA_OFFSET -4
689 jmp ret_from_exception
693 * Debug traps and NMI can happen at the one SYSENTER instruction
694 * that sets up the real kernel stack. Check here, since we can't
695 * allow the wrong stack to be used.
697 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
698 * already pushed 3 words if it hits on the sysenter instruction:
699 * eflags, cs and eip.
701 * We just load the right stack, and push the three (known) values
702 * by hand onto the new stack - while updating the return eip past
703 * the instruction that would have done it for sysenter.
705 #define FIX_STACK(offset, ok, label) \
706 cmpw $__KERNEL_CS,4(%esp); \
709 movl TSS_sysenter_esp0+offset(%esp),%esp; \
710 CFI_DEF_CFA esp, 0; \
713 CFI_ADJUST_CFA_OFFSET 4; \
714 pushl $__KERNEL_CS; \
715 CFI_ADJUST_CFA_OFFSET 4; \
716 pushl $sysenter_past_esp; \
717 CFI_ADJUST_CFA_OFFSET 4; \
718 CFI_REL_OFFSET eip, 0
722 cmpl $sysenter_entry,(%esp)
723 jne debug_stack_correct
724 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
726 pushl $-1 # mark this as an int
727 CFI_ADJUST_CFA_OFFSET 4
729 xorl %edx,%edx # error code 0
730 movl %esp,%eax # pt_regs pointer
732 jmp ret_from_exception
737 * NMI is doubly nasty. It can happen _while_ we're handling
738 * a debug fault, and the debug fault hasn't yet been able to
739 * clear up the stack. So we first check whether we got an
740 * NMI on the sysenter entry path, but after that we need to
741 * check whether we got an NMI on the debug path where the debug
742 * fault happened on the sysenter path.
747 CFI_ADJUST_CFA_OFFSET 4
749 cmpw $__ESPFIX_SS, %ax
751 CFI_ADJUST_CFA_OFFSET -4
753 cmpl $sysenter_entry,(%esp)
756 CFI_ADJUST_CFA_OFFSET 4
758 /* Do not access memory above the end of our stack page,
759 * it might not exist.
761 andl $(THREAD_SIZE-1),%eax
762 cmpl $(THREAD_SIZE-20),%eax
764 CFI_ADJUST_CFA_OFFSET -4
765 jae nmi_stack_correct
766 cmpl $sysenter_entry,12(%esp)
767 je nmi_debug_stack_check
769 /* We have a RING0_INT_FRAME here */
771 CFI_ADJUST_CFA_OFFSET 4
773 xorl %edx,%edx # zero error code
774 movl %esp,%eax # pt_regs pointer
776 jmp restore_nocheck_notrace
781 FIX_STACK(12,nmi_stack_correct, 1)
782 jmp nmi_stack_correct
784 nmi_debug_stack_check:
785 /* We have a RING0_INT_FRAME here */
786 cmpw $__KERNEL_CS,16(%esp)
787 jne nmi_stack_correct
790 cmpl $debug_esp_fix_insn,(%esp)
792 FIX_STACK(24,nmi_stack_correct, 1)
793 jmp nmi_stack_correct
796 /* We have a RING0_INT_FRAME here.
798 * create the pointer to lss back
801 CFI_ADJUST_CFA_OFFSET 4
803 CFI_ADJUST_CFA_OFFSET 4
806 /* copy the iret frame of 12 bytes */
809 CFI_ADJUST_CFA_OFFSET 4
812 CFI_ADJUST_CFA_OFFSET 4
814 FIXUP_ESPFIX_STACK # %eax == %esp
815 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
816 xorl %edx,%edx # zero error code
819 lss 12+4(%esp), %esp # back to 16bit stack
822 .section __ex_table,"a"
830 pushl $-1 # mark this as an int
831 CFI_ADJUST_CFA_OFFSET 4
833 xorl %edx,%edx # zero error code
834 movl %esp,%eax # pt_regs pointer
836 jmp ret_from_exception
843 CFI_ADJUST_CFA_OFFSET 4
845 CFI_ADJUST_CFA_OFFSET 4
852 CFI_ADJUST_CFA_OFFSET 4
854 CFI_ADJUST_CFA_OFFSET 4
861 CFI_ADJUST_CFA_OFFSET 4
863 CFI_ADJUST_CFA_OFFSET 4
867 ENTRY(coprocessor_segment_overrun)
870 CFI_ADJUST_CFA_OFFSET 4
871 pushl $do_coprocessor_segment_overrun
872 CFI_ADJUST_CFA_OFFSET 4
878 pushl $do_invalid_TSS
879 CFI_ADJUST_CFA_OFFSET 4
883 ENTRY(segment_not_present)
885 pushl $do_segment_not_present
886 CFI_ADJUST_CFA_OFFSET 4
892 pushl $do_stack_segment
893 CFI_ADJUST_CFA_OFFSET 4
897 KPROBE_ENTRY(general_protection)
899 pushl $do_general_protection
900 CFI_ADJUST_CFA_OFFSET 4
903 KPROBE_END(general_protection)
905 ENTRY(alignment_check)
907 pushl $do_alignment_check
908 CFI_ADJUST_CFA_OFFSET 4
914 pushl $0 # no error code
915 CFI_ADJUST_CFA_OFFSET 4
916 pushl $do_divide_error
917 CFI_ADJUST_CFA_OFFSET 4
921 #ifdef CONFIG_X86_MCE
925 CFI_ADJUST_CFA_OFFSET 4
926 pushl machine_check_vector
927 CFI_ADJUST_CFA_OFFSET 4
932 ENTRY(spurious_interrupt_bug)
935 CFI_ADJUST_CFA_OFFSET 4
936 pushl $do_spurious_interrupt_bug
937 CFI_ADJUST_CFA_OFFSET 4
941 #ifdef CONFIG_STACK_UNWIND
942 ENTRY(arch_unwind_init_running)
955 movl $__USER_DS, DS(%edx)
956 movl $__USER_DS, ES(%edx)
957 movl %ebx, ORIG_EAX(%edx)
960 movl $__KERNEL_CS, CS(%edx)
961 movl %ebx, EFLAGS(%edx)
962 movl %eax, OLDESP(%edx)
966 movl $__KERNEL_DS, OLDSS(%edx)
969 ENDPROC(arch_unwind_init_running)
972 ENTRY(kernel_thread_helper)
973 pushl $0 # fake return address for unwinder
977 CFI_ADJUST_CFA_OFFSET 4
980 CFI_ADJUST_CFA_OFFSET 4
983 ENDPROC(kernel_thread_helper)
986 #include "syscall_table.S"
988 syscall_table_size=(.-sys_call_table)