1bd7960b13572bbed08bc054534a49b32f6f46d4
[pandora-kernel.git] / arch / x86 / kernel / traps_32.c
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  *
5  *  Pentium III FXSR, SSE support
6  *      Gareth Hughes <gareth@valinux.com>, May 2000
7  */
8
9 /*
10  * 'Traps.c' handles hardware traps and faults after we have saved some
11  * state in 'asm.s'.
12  */
13 #include <linux/interrupt.h>
14 #include <linux/kallsyms.h>
15 #include <linux/spinlock.h>
16 #include <linux/highmem.h>
17 #include <linux/kprobes.h>
18 #include <linux/uaccess.h>
19 #include <linux/utsname.h>
20 #include <linux/kdebug.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/ptrace.h>
24 #include <linux/string.h>
25 #include <linux/unwind.h>
26 #include <linux/delay.h>
27 #include <linux/errno.h>
28 #include <linux/kexec.h>
29 #include <linux/sched.h>
30 #include <linux/timer.h>
31 #include <linux/init.h>
32 #include <linux/bug.h>
33 #include <linux/nmi.h>
34 #include <linux/mm.h>
35
36 #ifdef CONFIG_EISA
37 #include <linux/ioport.h>
38 #include <linux/eisa.h>
39 #endif
40
41 #ifdef CONFIG_MCA
42 #include <linux/mca.h>
43 #endif
44
45 #if defined(CONFIG_EDAC)
46 #include <linux/edac.h>
47 #endif
48
49 #include <asm/processor-flags.h>
50 #include <asm/arch_hooks.h>
51 #include <asm/stacktrace.h>
52 #include <asm/processor.h>
53 #include <asm/debugreg.h>
54 #include <asm/atomic.h>
55 #include <asm/system.h>
56 #include <asm/unwind.h>
57 #include <asm/desc.h>
58 #include <asm/i387.h>
59 #include <asm/nmi.h>
60 #include <asm/smp.h>
61 #include <asm/io.h>
62 #include <asm/traps.h>
63
64 #include "mach_traps.h"
65 #include "cpu/mcheck/mce.h"
66
67 DECLARE_BITMAP(used_vectors, NR_VECTORS);
68 EXPORT_SYMBOL_GPL(used_vectors);
69
70 asmlinkage int system_call(void);
71
72 /* Do we ignore FPU interrupts ? */
73 char ignore_fpu_irq;
74
75 /*
76  * The IDT has to be page-aligned to simplify the Pentium
77  * F0 0F bug workaround.. We have a special link segment
78  * for this.
79  */
80 gate_desc idt_table[256]
81         __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
82
83 int panic_on_unrecovered_nmi;
84 int kstack_depth_to_print = 24;
85 static unsigned int code_bytes = 64;
86 static int ignore_nmis;
87 static int die_counter;
88
89 static inline void conditional_sti(struct pt_regs *regs)
90 {
91         if (regs->flags & X86_EFLAGS_IF)
92                 local_irq_enable();
93 }
94
95 void printk_address(unsigned long address, int reliable)
96 {
97 #ifdef CONFIG_KALLSYMS
98         unsigned long offset = 0;
99         unsigned long symsize;
100         const char *symname;
101         char *modname;
102         char *delim = ":";
103         char namebuf[KSYM_NAME_LEN];
104         char reliab[4] = "";
105
106         symname = kallsyms_lookup(address, &symsize, &offset,
107                                         &modname, namebuf);
108         if (!symname) {
109                 printk(" [<%08lx>]\n", address);
110                 return;
111         }
112         if (!reliable)
113                 strcpy(reliab, "? ");
114
115         if (!modname)
116                 modname = delim = "";
117         printk(" [<%08lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
118                 address, reliab, delim, modname, delim, symname, offset, symsize);
119 #else
120         printk(" [<%08lx>]\n", address);
121 #endif
122 }
123
124 static inline int valid_stack_ptr(struct thread_info *tinfo,
125                         void *p, unsigned int size)
126 {
127         void *t = tinfo;
128         return  p > t && p <= t + THREAD_SIZE - size;
129 }
130
131 /* The form of the top of the frame on the stack */
132 struct stack_frame {
133         struct stack_frame *next_frame;
134         unsigned long return_address;
135 };
136
137 static inline unsigned long
138 print_context_stack(struct thread_info *tinfo,
139                 unsigned long *stack, unsigned long bp,
140                 const struct stacktrace_ops *ops, void *data)
141 {
142         struct stack_frame *frame = (struct stack_frame *)bp;
143
144         while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) {
145                 unsigned long addr;
146
147                 addr = *stack;
148                 if (__kernel_text_address(addr)) {
149                         if ((unsigned long) stack == bp + 4) {
150                                 ops->address(data, addr, 1);
151                                 frame = frame->next_frame;
152                                 bp = (unsigned long) frame;
153                         } else {
154                                 ops->address(data, addr, bp == 0);
155                         }
156                 }
157                 stack++;
158         }
159         return bp;
160 }
161
162 void dump_trace(struct task_struct *task, struct pt_regs *regs,
163                 unsigned long *stack, unsigned long bp,
164                 const struct stacktrace_ops *ops, void *data)
165 {
166         if (!task)
167                 task = current;
168
169         if (!stack) {
170                 unsigned long dummy;
171                 stack = &dummy;
172                 if (task != current)
173                         stack = (unsigned long *)task->thread.sp;
174         }
175
176 #ifdef CONFIG_FRAME_POINTER
177         if (!bp) {
178                 if (task == current) {
179                         /* Grab bp right from our regs */
180                         asm("movl %%ebp, %0" : "=r" (bp) :);
181                 } else {
182                         /* bp is the last reg pushed by switch_to */
183                         bp = *(unsigned long *) task->thread.sp;
184                 }
185         }
186 #endif
187
188         for (;;) {
189                 struct thread_info *context;
190
191                 context = (struct thread_info *)
192                         ((unsigned long)stack & (~(THREAD_SIZE - 1)));
193                 bp = print_context_stack(context, stack, bp, ops, data);
194                 /*
195                  * Should be after the line below, but somewhere
196                  * in early boot context comes out corrupted and we
197                  * can't reference it:
198                  */
199                 if (ops->stack(data, "IRQ") < 0)
200                         break;
201                 stack = (unsigned long *)context->previous_esp;
202                 if (!stack)
203                         break;
204                 touch_nmi_watchdog();
205         }
206 }
207 EXPORT_SYMBOL(dump_trace);
208
209 static void
210 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
211 {
212         printk(data);
213         print_symbol(msg, symbol);
214         printk("\n");
215 }
216
217 static void print_trace_warning(void *data, char *msg)
218 {
219         printk("%s%s\n", (char *)data, msg);
220 }
221
222 static int print_trace_stack(void *data, char *name)
223 {
224         return 0;
225 }
226
227 /*
228  * Print one address/symbol entries per line.
229  */
230 static void print_trace_address(void *data, unsigned long addr, int reliable)
231 {
232         printk("%s [<%08lx>] ", (char *)data, addr);
233         if (!reliable)
234                 printk("? ");
235         print_symbol("%s\n", addr);
236         touch_nmi_watchdog();
237 }
238
239 static const struct stacktrace_ops print_trace_ops = {
240         .warning = print_trace_warning,
241         .warning_symbol = print_trace_warning_symbol,
242         .stack = print_trace_stack,
243         .address = print_trace_address,
244 };
245
246 static void
247 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
248                 unsigned long *stack, unsigned long bp, char *log_lvl)
249 {
250         dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
251         printk("%s =======================\n", log_lvl);
252 }
253
254 void show_trace(struct task_struct *task, struct pt_regs *regs,
255                 unsigned long *stack, unsigned long bp)
256 {
257         show_trace_log_lvl(task, regs, stack, bp, "");
258 }
259
260 static void
261 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
262                    unsigned long *sp, unsigned long bp, char *log_lvl)
263 {
264         unsigned long *stack;
265         int i;
266
267         if (sp == NULL) {
268                 if (task)
269                         sp = (unsigned long *)task->thread.sp;
270                 else
271                         sp = (unsigned long *)&sp;
272         }
273
274         stack = sp;
275         for (i = 0; i < kstack_depth_to_print; i++) {
276                 if (kstack_end(stack))
277                         break;
278                 if (i && ((i % 8) == 0))
279                         printk("\n%s       ", log_lvl);
280                 printk("%08lx ", *stack++);
281         }
282         printk("\n%sCall Trace:\n", log_lvl);
283
284         show_trace_log_lvl(task, regs, sp, bp, log_lvl);
285 }
286
287 void show_stack(struct task_struct *task, unsigned long *sp)
288 {
289         printk("       ");
290         show_stack_log_lvl(task, NULL, sp, 0, "");
291 }
292
293 /*
294  * The architecture-independent dump_stack generator
295  */
296 void dump_stack(void)
297 {
298         unsigned long bp = 0;
299         unsigned long stack;
300
301 #ifdef CONFIG_FRAME_POINTER
302         if (!bp)
303                 asm("movl %%ebp, %0" : "=r" (bp):);
304 #endif
305
306         printk("Pid: %d, comm: %.20s %s %s %.*s\n",
307                 current->pid, current->comm, print_tainted(),
308                 init_utsname()->release,
309                 (int)strcspn(init_utsname()->version, " "),
310                 init_utsname()->version);
311
312         show_trace(current, NULL, &stack, bp);
313 }
314
315 EXPORT_SYMBOL(dump_stack);
316
317 void show_registers(struct pt_regs *regs)
318 {
319         int i;
320
321         print_modules();
322         __show_registers(regs, 0);
323
324         printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
325                 TASK_COMM_LEN, current->comm, task_pid_nr(current),
326                 current_thread_info(), current, task_thread_info(current));
327         /*
328          * When in-kernel, we also print out the stack and code at the
329          * time of the fault..
330          */
331         if (!user_mode_vm(regs)) {
332                 unsigned int code_prologue = code_bytes * 43 / 64;
333                 unsigned int code_len = code_bytes;
334                 unsigned char c;
335                 u8 *ip;
336
337                 printk("\n" KERN_EMERG "Stack: ");
338                 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
339
340                 printk(KERN_EMERG "Code: ");
341
342                 ip = (u8 *)regs->ip - code_prologue;
343                 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
344                         /* try starting at EIP */
345                         ip = (u8 *)regs->ip;
346                         code_len = code_len - code_prologue + 1;
347                 }
348                 for (i = 0; i < code_len; i++, ip++) {
349                         if (ip < (u8 *)PAGE_OFFSET ||
350                                         probe_kernel_address(ip, c)) {
351                                 printk(" Bad EIP value.");
352                                 break;
353                         }
354                         if (ip == (u8 *)regs->ip)
355                                 printk("<%02x> ", c);
356                         else
357                                 printk("%02x ", c);
358                 }
359         }
360         printk("\n");
361 }
362
363 int is_valid_bugaddr(unsigned long ip)
364 {
365         unsigned short ud2;
366
367         if (ip < PAGE_OFFSET)
368                 return 0;
369         if (probe_kernel_address((unsigned short *)ip, ud2))
370                 return 0;
371
372         return ud2 == 0x0b0f;
373 }
374
375 static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
376 static int die_owner = -1;
377 static unsigned int die_nest_count;
378
379 unsigned __kprobes long oops_begin(void)
380 {
381         unsigned long flags;
382
383         oops_enter();
384
385         if (die_owner != raw_smp_processor_id()) {
386                 console_verbose();
387                 raw_local_irq_save(flags);
388                 __raw_spin_lock(&die_lock);
389                 die_owner = smp_processor_id();
390                 die_nest_count = 0;
391                 bust_spinlocks(1);
392         } else {
393                 raw_local_irq_save(flags);
394         }
395         die_nest_count++;
396         return flags;
397 }
398
399 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
400 {
401         bust_spinlocks(0);
402         die_owner = -1;
403         add_taint(TAINT_DIE);
404         __raw_spin_unlock(&die_lock);
405         raw_local_irq_restore(flags);
406
407         if (!regs)
408                 return;
409
410         if (kexec_should_crash(current))
411                 crash_kexec(regs);
412
413         if (in_interrupt())
414                 panic("Fatal exception in interrupt");
415
416         if (panic_on_oops)
417                 panic("Fatal exception");
418
419         oops_exit();
420         do_exit(signr);
421 }
422
423 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
424 {
425         unsigned short ss;
426         unsigned long sp;
427
428         printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
429 #ifdef CONFIG_PREEMPT
430         printk("PREEMPT ");
431 #endif
432 #ifdef CONFIG_SMP
433         printk("SMP ");
434 #endif
435 #ifdef CONFIG_DEBUG_PAGEALLOC
436         printk("DEBUG_PAGEALLOC");
437 #endif
438         printk("\n");
439         if (notify_die(DIE_OOPS, str, regs, err,
440                         current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
441                 return 1;
442
443         show_registers(regs);
444         /* Executive summary in case the oops scrolled away */
445         sp = (unsigned long) (&regs->sp);
446         savesegment(ss, ss);
447         if (user_mode(regs)) {
448                 sp = regs->sp;
449                 ss = regs->ss & 0xffff;
450         }
451         printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
452         print_symbol("%s", regs->ip);
453         printk(" SS:ESP %04x:%08lx\n", ss, sp);
454         return 0;
455 }
456
457 /*
458  * This is gone through when something in the kernel has done something bad
459  * and is about to be terminated:
460  */
461 void die(const char *str, struct pt_regs *regs, long err)
462 {
463         unsigned long flags = oops_begin();
464
465         if (die_nest_count < 3) {
466                 report_bug(regs->ip, regs);
467
468                 if (__die(str, regs, err))
469                         regs = NULL;
470         } else {
471                 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
472         }
473
474         oops_end(flags, regs, SIGSEGV);
475 }
476
477 static inline void
478 die_if_kernel(const char *str, struct pt_regs *regs, long err)
479 {
480         if (!user_mode_vm(regs))
481                 die(str, regs, err);
482 }
483
484 static void __kprobes
485 do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs *regs,
486         long error_code, siginfo_t *info)
487 {
488         struct task_struct *tsk = current;
489
490         if (regs->flags & X86_VM_MASK) {
491                 if (vm86)
492                         goto vm86_trap;
493                 goto trap_signal;
494         }
495
496         if (!user_mode(regs))
497                 goto kernel_trap;
498
499 trap_signal:
500         /*
501          * We want error_code and trap_no set for userspace faults and
502          * kernelspace faults which result in die(), but not
503          * kernelspace faults which are fixed up.  die() gives the
504          * process no chance to handle the signal and notice the
505          * kernel fault information, so that won't result in polluting
506          * the information about previously queued, but not yet
507          * delivered, faults.  See also do_general_protection below.
508          */
509         tsk->thread.error_code = error_code;
510         tsk->thread.trap_no = trapnr;
511
512         if (info)
513                 force_sig_info(signr, info, tsk);
514         else
515                 force_sig(signr, tsk);
516         return;
517
518 kernel_trap:
519         if (!fixup_exception(regs)) {
520                 tsk->thread.error_code = error_code;
521                 tsk->thread.trap_no = trapnr;
522                 die(str, regs, error_code);
523         }
524         return;
525
526 vm86_trap:
527         if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
528                                                 error_code, trapnr))
529                 goto trap_signal;
530         return;
531 }
532
533 #define DO_TRAP(trapnr, signr, str, name)                               \
534 void do_##name(struct pt_regs *regs, long error_code)                   \
535 {                                                                       \
536         trace_hardirqs_fixup();                                         \
537         if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)  \
538                                                         == NOTIFY_STOP) \
539                 return;                                                 \
540         do_trap(trapnr, signr, str, 0, regs, error_code, NULL);         \
541 }
542
543 #define DO_TRAP_INFO(trapnr, signr, str, name, sicode, siaddr, irq)     \
544 void do_##name(struct pt_regs *regs, long error_code)                   \
545 {                                                                       \
546         siginfo_t info;                                                 \
547         if (irq)                                                        \
548                 local_irq_enable();                                     \
549         info.si_signo = signr;                                          \
550         info.si_errno = 0;                                              \
551         info.si_code = sicode;                                          \
552         info.si_addr = (void __user *)siaddr;                           \
553         if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)  \
554                                                         == NOTIFY_STOP) \
555                 return;                                                 \
556         do_trap(trapnr, signr, str, 0, regs, error_code, &info);        \
557 }
558
559 #define DO_VM86_TRAP(trapnr, signr, str, name)                          \
560 void do_##name(struct pt_regs *regs, long error_code)                   \
561 {                                                                       \
562         if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)  \
563                                                         == NOTIFY_STOP) \
564                 return;                                                 \
565         do_trap(trapnr, signr, str, 1, regs, error_code, NULL);         \
566 }
567
568 #define DO_VM86_TRAP_INFO(trapnr, signr, str, name, sicode, siaddr)     \
569 void do_##name(struct pt_regs *regs, long error_code)                   \
570 {                                                                       \
571         siginfo_t info;                                                 \
572         info.si_signo = signr;                                          \
573         info.si_errno = 0;                                              \
574         info.si_code = sicode;                                          \
575         info.si_addr = (void __user *)siaddr;                           \
576         trace_hardirqs_fixup();                                         \
577         if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)  \
578                                                         == NOTIFY_STOP) \
579                 return;                                                 \
580         do_trap(trapnr, signr, str, 1, regs, error_code, &info);        \
581 }
582
583 #define DO_ERROR(trapnr, signr, str, name)                              \
584 void do_##name(struct pt_regs *regs, long error_code)                   \
585 {                                                                       \
586         trace_hardirqs_fixup();                                         \
587         if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)  \
588                                                         == NOTIFY_STOP) \
589                 return;                                                 \
590         conditional_sti(regs);                                          \
591         do_trap(trapnr, signr, str, 0, regs, error_code, NULL);         \
592 }
593
594 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq)    \
595 void do_##name(struct pt_regs *regs, long error_code)                   \
596 {                                                                       \
597         siginfo_t info;                                                 \
598         if (irq)                                                        \
599                 local_irq_enable();                                     \
600         info.si_signo = signr;                                          \
601         info.si_errno = 0;                                              \
602         info.si_code = sicode;                                          \
603         info.si_addr = (void __user *)siaddr;                           \
604         if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)  \
605                                                         == NOTIFY_STOP) \
606                 return;                                                 \
607         conditional_sti(regs);                                          \
608         do_trap(trapnr, signr, str, 0, regs, error_code, &info);        \
609 }
610
611 #define DO_VM86_ERROR(trapnr, signr, str, name)                         \
612 void do_##name(struct pt_regs *regs, long error_code)                   \
613 {                                                                       \
614         if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)  \
615                                                         == NOTIFY_STOP) \
616                 return;                                                 \
617         conditional_sti(regs);                                          \
618         do_trap(trapnr, signr, str, 1, regs, error_code, NULL);         \
619 }
620
621 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr)    \
622 void do_##name(struct pt_regs *regs, long error_code)                   \
623 {                                                                       \
624         siginfo_t info;                                                 \
625         info.si_signo = signr;                                          \
626         info.si_errno = 0;                                              \
627         info.si_code = sicode;                                          \
628         info.si_addr = (void __user *)siaddr;                           \
629         trace_hardirqs_fixup();                                         \
630         if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)  \
631                                                         == NOTIFY_STOP) \
632                 return;                                                 \
633         conditional_sti(regs);                                          \
634         do_trap(trapnr, signr, str, 1, regs, error_code, &info);        \
635 }
636
637 DO_VM86_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
638 DO_VM86_ERROR(4, SIGSEGV, "overflow", overflow)
639 DO_VM86_ERROR(5, SIGSEGV, "bounds", bounds)
640 DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0)
641 DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
642 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
643 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
644 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
645 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
646 DO_TRAP_INFO(32, SIGILL, "iret exception", iret_error, ILL_BADSTK, 0, 1)
647
648 void __kprobes
649 do_general_protection(struct pt_regs *regs, long error_code)
650 {
651         struct task_struct *tsk;
652         struct thread_struct *thread;
653         struct tss_struct *tss;
654         int cpu;
655
656         conditional_sti(regs);
657
658         cpu = get_cpu();
659         tss = &per_cpu(init_tss, cpu);
660         thread = &current->thread;
661
662         /*
663          * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
664          * invalid offset set (the LAZY one) and the faulting thread has
665          * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
666          * and we set the offset field correctly. Then we let the CPU to
667          * restart the faulting instruction.
668          */
669         if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
670             thread->io_bitmap_ptr) {
671                 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
672                        thread->io_bitmap_max);
673                 /*
674                  * If the previously set map was extending to higher ports
675                  * than the current one, pad extra space with 0xff (no access).
676                  */
677                 if (thread->io_bitmap_max < tss->io_bitmap_max) {
678                         memset((char *) tss->io_bitmap +
679                                 thread->io_bitmap_max, 0xff,
680                                 tss->io_bitmap_max - thread->io_bitmap_max);
681                 }
682                 tss->io_bitmap_max = thread->io_bitmap_max;
683                 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
684                 tss->io_bitmap_owner = thread;
685                 put_cpu();
686
687                 return;
688         }
689         put_cpu();
690
691         if (regs->flags & X86_VM_MASK)
692                 goto gp_in_vm86;
693
694         tsk = current;
695         if (!user_mode(regs))
696                 goto gp_in_kernel;
697
698         tsk->thread.error_code = error_code;
699         tsk->thread.trap_no = 13;
700
701         if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
702                         printk_ratelimit()) {
703                 printk(KERN_INFO
704                         "%s[%d] general protection ip:%lx sp:%lx error:%lx",
705                         tsk->comm, task_pid_nr(tsk),
706                         regs->ip, regs->sp, error_code);
707                 print_vma_addr(" in ", regs->ip);
708                 printk("\n");
709         }
710
711         force_sig(SIGSEGV, tsk);
712         return;
713
714 gp_in_vm86:
715         local_irq_enable();
716         handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
717         return;
718
719 gp_in_kernel:
720         if (fixup_exception(regs))
721                 return;
722
723         tsk->thread.error_code = error_code;
724         tsk->thread.trap_no = 13;
725         if (notify_die(DIE_GPF, "general protection fault", regs,
726                                 error_code, 13, SIGSEGV) == NOTIFY_STOP)
727                 return;
728         die("general protection fault", regs, error_code);
729 }
730
731 static notrace __kprobes void
732 mem_parity_error(unsigned char reason, struct pt_regs *regs)
733 {
734         printk(KERN_EMERG
735                 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
736                         reason, smp_processor_id());
737
738         printk(KERN_EMERG
739                 "You have some hardware problem, likely on the PCI bus.\n");
740
741 #if defined(CONFIG_EDAC)
742         if (edac_handler_set()) {
743                 edac_atomic_assert_error();
744                 return;
745         }
746 #endif
747
748         if (panic_on_unrecovered_nmi)
749                 panic("NMI: Not continuing");
750
751         printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
752
753         /* Clear and disable the memory parity error line. */
754         clear_mem_error(reason);
755 }
756
757 static notrace __kprobes void
758 io_check_error(unsigned char reason, struct pt_regs *regs)
759 {
760         unsigned long i;
761
762         printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
763         show_registers(regs);
764
765         /* Re-enable the IOCK line, wait for a few seconds */
766         reason = (reason & 0xf) | 8;
767         outb(reason, 0x61);
768
769         i = 2000;
770         while (--i)
771                 udelay(1000);
772
773         reason &= ~8;
774         outb(reason, 0x61);
775 }
776
777 static notrace __kprobes void
778 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
779 {
780         if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
781                 return;
782 #ifdef CONFIG_MCA
783         /*
784          * Might actually be able to figure out what the guilty party
785          * is:
786          */
787         if (MCA_bus) {
788                 mca_handle_nmi();
789                 return;
790         }
791 #endif
792         printk(KERN_EMERG
793                 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
794                         reason, smp_processor_id());
795
796         printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
797         if (panic_on_unrecovered_nmi)
798                 panic("NMI: Not continuing");
799
800         printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
801 }
802
803 static DEFINE_SPINLOCK(nmi_print_lock);
804
805 void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
806 {
807         if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
808                 return;
809
810         spin_lock(&nmi_print_lock);
811         /*
812         * We are in trouble anyway, lets at least try
813         * to get a message out:
814         */
815         bust_spinlocks(1);
816         printk(KERN_EMERG "%s", str);
817         printk(" on CPU%d, ip %08lx, registers:\n",
818                 smp_processor_id(), regs->ip);
819         show_registers(regs);
820         if (do_panic)
821                 panic("Non maskable interrupt");
822         console_silent();
823         spin_unlock(&nmi_print_lock);
824         bust_spinlocks(0);
825
826         /*
827          * If we are in kernel we are probably nested up pretty bad
828          * and might aswell get out now while we still can:
829          */
830         if (!user_mode_vm(regs)) {
831                 current->thread.trap_no = 2;
832                 crash_kexec(regs);
833         }
834
835         do_exit(SIGSEGV);
836 }
837
838 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
839 {
840         unsigned char reason = 0;
841         int cpu;
842
843         cpu = smp_processor_id();
844
845         /* Only the BSP gets external NMIs from the system. */
846         if (!cpu)
847                 reason = get_nmi_reason();
848
849         if (!(reason & 0xc0)) {
850                 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
851                                                                 == NOTIFY_STOP)
852                         return;
853 #ifdef CONFIG_X86_LOCAL_APIC
854                 /*
855                  * Ok, so this is none of the documented NMI sources,
856                  * so it must be the NMI watchdog.
857                  */
858                 if (nmi_watchdog_tick(regs, reason))
859                         return;
860                 if (!do_nmi_callback(regs, cpu))
861                         unknown_nmi_error(reason, regs);
862 #else
863                 unknown_nmi_error(reason, regs);
864 #endif
865
866                 return;
867         }
868         if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
869                 return;
870
871         /* AK: following checks seem to be broken on modern chipsets. FIXME */
872         if (reason & 0x80)
873                 mem_parity_error(reason, regs);
874         if (reason & 0x40)
875                 io_check_error(reason, regs);
876         /*
877          * Reassert NMI in case it became active meanwhile
878          * as it's edge-triggered:
879          */
880         reassert_nmi();
881 }
882
883 notrace __kprobes void do_nmi(struct pt_regs *regs, long error_code)
884 {
885         int cpu;
886
887         nmi_enter();
888
889         cpu = smp_processor_id();
890
891         ++nmi_count(cpu);
892
893         if (!ignore_nmis)
894                 default_do_nmi(regs);
895
896         nmi_exit();
897 }
898
899 void stop_nmi(void)
900 {
901         acpi_nmi_disable();
902         ignore_nmis++;
903 }
904
905 void restart_nmi(void)
906 {
907         ignore_nmis--;
908         acpi_nmi_enable();
909 }
910
911 void __kprobes do_int3(struct pt_regs *regs, long error_code)
912 {
913 #ifdef CONFIG_KPROBES
914         trace_hardirqs_fixup();
915
916         if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
917                         == NOTIFY_STOP)
918                 return;
919         /*
920          * This is an interrupt gate, because kprobes wants interrupts
921          * disabled. Normal trap handlers don't.
922          */
923         conditional_sti(regs);
924 #else
925         if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
926                         == NOTIFY_STOP)
927                 return;
928 #endif
929
930         do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
931 }
932
933 /*
934  * Our handling of the processor debug registers is non-trivial.
935  * We do not clear them on entry and exit from the kernel. Therefore
936  * it is possible to get a watchpoint trap here from inside the kernel.
937  * However, the code in ./ptrace.c has ensured that the user can
938  * only set watchpoints on userspace addresses. Therefore the in-kernel
939  * watchpoint trap can only occur in code which is reading/writing
940  * from user space. Such code must not hold kernel locks (since it
941  * can equally take a page fault), therefore it is safe to call
942  * force_sig_info even though that claims and releases locks.
943  *
944  * Code in ./signal.c ensures that the debug control register
945  * is restored before we deliver any signal, and therefore that
946  * user code runs with the correct debug control register even though
947  * we clear it here.
948  *
949  * Being careful here means that we don't have to be as careful in a
950  * lot of more complicated places (task switching can be a bit lazy
951  * about restoring all the debug state, and ptrace doesn't have to
952  * find every occurrence of the TF bit that could be saved away even
953  * by user code)
954  */
955 void __kprobes do_debug(struct pt_regs *regs, long error_code)
956 {
957         struct task_struct *tsk = current;
958         unsigned int condition;
959         int si_code;
960
961         trace_hardirqs_fixup();
962
963         get_debugreg(condition, 6);
964
965         /*
966          * The processor cleared BTF, so don't mark that we need it set.
967          */
968         clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
969         tsk->thread.debugctlmsr = 0;
970
971         if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
972                                                 SIGTRAP) == NOTIFY_STOP)
973                 return;
974         /* It's safe to allow irq's after DR6 has been saved */
975         if (regs->flags & X86_EFLAGS_IF)
976                 local_irq_enable();
977
978         /* Mask out spurious debug traps due to lazy DR7 setting */
979         if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
980                 if (!tsk->thread.debugreg7)
981                         goto clear_dr7;
982         }
983
984         if (regs->flags & X86_VM_MASK)
985                 goto debug_vm86;
986
987         /* Save debug status register where ptrace can see it */
988         tsk->thread.debugreg6 = condition;
989
990         /*
991          * Single-stepping through TF: make sure we ignore any events in
992          * kernel space (but re-enable TF when returning to user mode).
993          */
994         if (condition & DR_STEP) {
995                 /*
996                  * We already checked v86 mode above, so we can
997                  * check for kernel mode by just checking the CPL
998                  * of CS.
999                  */
1000                 if (!user_mode(regs))
1001                         goto clear_TF_reenable;
1002         }
1003
1004         si_code = get_si_code((unsigned long)condition);
1005         /* Ok, finally something we can handle */
1006         send_sigtrap(tsk, regs, error_code, si_code);
1007
1008         /*
1009          * Disable additional traps. They'll be re-enabled when
1010          * the signal is delivered.
1011          */
1012 clear_dr7:
1013         set_debugreg(0, 7);
1014         return;
1015
1016 debug_vm86:
1017         handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
1018         return;
1019
1020 clear_TF_reenable:
1021         set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
1022         regs->flags &= ~X86_EFLAGS_TF;
1023         return;
1024 }
1025
1026 /*
1027  * Note that we play around with the 'TS' bit in an attempt to get
1028  * the correct behaviour even in the presence of the asynchronous
1029  * IRQ13 behaviour
1030  */
1031 void math_error(void __user *ip)
1032 {
1033         struct task_struct *task;
1034         siginfo_t info;
1035         unsigned short cwd, swd;
1036
1037         /*
1038          * Save the info for the exception handler and clear the error.
1039          */
1040         task = current;
1041         save_init_fpu(task);
1042         task->thread.trap_no = 16;
1043         task->thread.error_code = 0;
1044         info.si_signo = SIGFPE;
1045         info.si_errno = 0;
1046         info.si_code = __SI_FAULT;
1047         info.si_addr = ip;
1048         /*
1049          * (~cwd & swd) will mask out exceptions that are not set to unmasked
1050          * status.  0x3f is the exception bits in these regs, 0x200 is the
1051          * C1 reg you need in case of a stack fault, 0x040 is the stack
1052          * fault bit.  We should only be taking one exception at a time,
1053          * so if this combination doesn't produce any single exception,
1054          * then we have a bad program that isn't synchronizing its FPU usage
1055          * and it will suffer the consequences since we won't be able to
1056          * fully reproduce the context of the exception
1057          */
1058         cwd = get_fpu_cwd(task);
1059         swd = get_fpu_swd(task);
1060         switch (swd & ~cwd & 0x3f) {
1061         case 0x000: /* No unmasked exception */
1062                 return;
1063         default: /* Multiple exceptions */
1064                 break;
1065         case 0x001: /* Invalid Op */
1066                 /*
1067                  * swd & 0x240 == 0x040: Stack Underflow
1068                  * swd & 0x240 == 0x240: Stack Overflow
1069                  * User must clear the SF bit (0x40) if set
1070                  */
1071                 info.si_code = FPE_FLTINV;
1072                 break;
1073         case 0x002: /* Denormalize */
1074         case 0x010: /* Underflow */
1075                 info.si_code = FPE_FLTUND;
1076                 break;
1077         case 0x004: /* Zero Divide */
1078                 info.si_code = FPE_FLTDIV;
1079                 break;
1080         case 0x008: /* Overflow */
1081                 info.si_code = FPE_FLTOVF;
1082                 break;
1083         case 0x020: /* Precision */
1084                 info.si_code = FPE_FLTRES;
1085                 break;
1086         }
1087         force_sig_info(SIGFPE, &info, task);
1088 }
1089
1090 void do_coprocessor_error(struct pt_regs *regs, long error_code)
1091 {
1092         conditional_sti(regs);
1093         ignore_fpu_irq = 1;
1094         math_error((void __user *)regs->ip);
1095 }
1096
1097 static void simd_math_error(void __user *ip)
1098 {
1099         struct task_struct *task;
1100         siginfo_t info;
1101         unsigned short mxcsr;
1102
1103         /*
1104          * Save the info for the exception handler and clear the error.
1105          */
1106         task = current;
1107         save_init_fpu(task);
1108         task->thread.trap_no = 19;
1109         task->thread.error_code = 0;
1110         info.si_signo = SIGFPE;
1111         info.si_errno = 0;
1112         info.si_code = __SI_FAULT;
1113         info.si_addr = ip;
1114         /*
1115          * The SIMD FPU exceptions are handled a little differently, as there
1116          * is only a single status/control register.  Thus, to determine which
1117          * unmasked exception was caught we must mask the exception mask bits
1118          * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1119          */
1120         mxcsr = get_fpu_mxcsr(task);
1121         switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
1122         case 0x000:
1123         default:
1124                 break;
1125         case 0x001: /* Invalid Op */
1126                 info.si_code = FPE_FLTINV;
1127                 break;
1128         case 0x002: /* Denormalize */
1129         case 0x010: /* Underflow */
1130                 info.si_code = FPE_FLTUND;
1131                 break;
1132         case 0x004: /* Zero Divide */
1133                 info.si_code = FPE_FLTDIV;
1134                 break;
1135         case 0x008: /* Overflow */
1136                 info.si_code = FPE_FLTOVF;
1137                 break;
1138         case 0x020: /* Precision */
1139                 info.si_code = FPE_FLTRES;
1140                 break;
1141         }
1142         force_sig_info(SIGFPE, &info, task);
1143 }
1144
1145 void do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
1146 {
1147         conditional_sti(regs);
1148
1149         if (cpu_has_xmm) {
1150                 /* Handle SIMD FPU exceptions on PIII+ processors. */
1151                 ignore_fpu_irq = 1;
1152                 simd_math_error((void __user *)regs->ip);
1153                 return;
1154         }
1155         /*
1156          * Handle strange cache flush from user space exception
1157          * in all other cases.  This is undocumented behaviour.
1158          */
1159         if (regs->flags & X86_VM_MASK) {
1160                 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
1161                 return;
1162         }
1163         current->thread.trap_no = 19;
1164         current->thread.error_code = error_code;
1165         die_if_kernel("cache flush denied", regs, error_code);
1166         force_sig(SIGSEGV, current);
1167 }
1168
1169 void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
1170 {
1171         conditional_sti(regs);
1172 #if 0
1173         /* No need to warn about this any longer. */
1174         printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1175 #endif
1176 }
1177
1178 unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
1179 {
1180         struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
1181         unsigned long base = (kesp - uesp) & -THREAD_SIZE;
1182         unsigned long new_kesp = kesp - base;
1183         unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
1184         __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
1185
1186         /* Set up base for espfix segment */
1187         desc &= 0x00f0ff0000000000ULL;
1188         desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
1189                 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
1190                 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
1191                 (lim_pages & 0xffff);
1192         *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
1193
1194         return new_kesp;
1195 }
1196
1197 /*
1198  * 'math_state_restore()' saves the current math information in the
1199  * old math state array, and gets the new ones from the current task
1200  *
1201  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1202  * Don't touch unless you *really* know how it works.
1203  *
1204  * Must be called with kernel preemption disabled (in this case,
1205  * local interrupts are disabled at the call-site in entry.S).
1206  */
1207 asmlinkage void math_state_restore(void)
1208 {
1209         struct thread_info *thread = current_thread_info();
1210         struct task_struct *tsk = thread->task;
1211
1212         if (!tsk_used_math(tsk)) {
1213                 local_irq_enable();
1214                 /*
1215                  * does a slab alloc which can sleep
1216                  */
1217                 if (init_fpu(tsk)) {
1218                         /*
1219                          * ran out of memory!
1220                          */
1221                         do_group_exit(SIGKILL);
1222                         return;
1223                 }
1224                 local_irq_disable();
1225         }
1226
1227         clts();                         /* Allow maths ops (or we recurse) */
1228         restore_fpu(tsk);
1229         thread->status |= TS_USEDFPU;   /* So we fnsave on switch_to() */
1230         tsk->fpu_counter++;
1231 }
1232 EXPORT_SYMBOL_GPL(math_state_restore);
1233
1234 #ifndef CONFIG_MATH_EMULATION
1235
1236 asmlinkage void math_emulate(long arg)
1237 {
1238         printk(KERN_EMERG
1239                 "math-emulation not enabled and no coprocessor found.\n");
1240         printk(KERN_EMERG "killing %s.\n", current->comm);
1241         force_sig(SIGFPE, current);
1242         schedule();
1243 }
1244
1245 #endif /* CONFIG_MATH_EMULATION */
1246
1247 void __kprobes do_device_not_available(struct pt_regs *regs, long error)
1248 {
1249         if (read_cr0() & X86_CR0_EM) {
1250                 conditional_sti(regs);
1251                 math_emulate(0);
1252         } else {
1253                 math_state_restore(); /* interrupts still off */
1254                 conditional_sti(regs);
1255         }
1256 }
1257
1258 #ifdef CONFIG_X86_MCE
1259 void __kprobes do_machine_check(struct pt_regs *regs, long error)
1260 {
1261         conditional_sti(regs);
1262         machine_check_vector(regs, error);
1263 }
1264 #endif
1265
1266 void __init trap_init(void)
1267 {
1268         int i;
1269
1270 #ifdef CONFIG_EISA
1271         void __iomem *p = early_ioremap(0x0FFFD9, 4);
1272
1273         if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
1274                 EISA_bus = 1;
1275         early_iounmap(p, 4);
1276 #endif
1277
1278         set_intr_gate(0, &divide_error);
1279         set_intr_gate(1, &debug);
1280         set_intr_gate(2, &nmi);
1281         set_system_intr_gate(3, &int3); /* int3 can be called from all */
1282         set_system_intr_gate(4, &overflow); /* int4 can be called from all */
1283         set_intr_gate(5, &bounds);
1284         set_intr_gate(6, &invalid_op);
1285         set_intr_gate(7, &device_not_available);
1286         set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
1287         set_intr_gate(9, &coprocessor_segment_overrun);
1288         set_intr_gate(10, &invalid_TSS);
1289         set_intr_gate(11, &segment_not_present);
1290         set_intr_gate(12, &stack_segment);
1291         set_intr_gate(13, &general_protection);
1292         set_intr_gate(14, &page_fault);
1293         set_intr_gate(15, &spurious_interrupt_bug);
1294         set_intr_gate(16, &coprocessor_error);
1295         set_intr_gate(17, &alignment_check);
1296 #ifdef CONFIG_X86_MCE
1297         set_intr_gate(18, &machine_check);
1298 #endif
1299         set_intr_gate(19, &simd_coprocessor_error);
1300
1301         if (cpu_has_fxsr) {
1302                 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1303                 set_in_cr4(X86_CR4_OSFXSR);
1304                 printk("done.\n");
1305         }
1306         if (cpu_has_xmm) {
1307                 printk(KERN_INFO
1308                         "Enabling unmasked SIMD FPU exception support... ");
1309                 set_in_cr4(X86_CR4_OSXMMEXCPT);
1310                 printk("done.\n");
1311         }
1312
1313         set_system_gate(SYSCALL_VECTOR, &system_call);
1314
1315         /* Reserve all the builtin and the syscall vector: */
1316         for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1317                 set_bit(i, used_vectors);
1318
1319         set_bit(SYSCALL_VECTOR, used_vectors);
1320
1321         /*
1322          * Should be a barrier for any external CPU state:
1323          */
1324         cpu_init();
1325
1326         trap_init_hook();
1327 }
1328
1329 static int __init kstack_setup(char *s)
1330 {
1331         kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1332
1333         return 1;
1334 }
1335 __setup("kstack=", kstack_setup);
1336
1337 static int __init code_bytes_setup(char *s)
1338 {
1339         code_bytes = simple_strtoul(s, NULL, 0);
1340         if (code_bytes > 8192)
1341                 code_bytes = 8192;
1342
1343         return 1;
1344 }
1345 __setup("code_bytes=", code_bytes_setup);