Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[pandora-kernel.git] / arch / m68k / mm / fault.c
1 /*
2  *  linux/arch/m68k/mm/fault.c
3  *
4  *  Copyright (C) 1995  Hamish Macdonald
5  */
6
7 #include <linux/mman.h>
8 #include <linux/mm.h>
9 #include <linux/kernel.h>
10 #include <linux/ptrace.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13
14 #include <asm/setup.h>
15 #include <asm/traps.h>
16 #include <asm/system.h>
17 #include <asm/uaccess.h>
18 #include <asm/pgalloc.h>
19
20 extern void die_if_kernel(char *, struct pt_regs *, long);
21 extern const int frame_extra_sizes[]; /* in m68k/kernel/signal.c */
22
23 int send_fault_sig(struct pt_regs *regs)
24 {
25         siginfo_t siginfo = { 0, 0, 0, };
26
27         siginfo.si_signo = current->thread.signo;
28         siginfo.si_code = current->thread.code;
29         siginfo.si_addr = (void *)current->thread.faddr;
30 #ifdef DEBUG
31         printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code);
32 #endif
33
34         if (user_mode(regs)) {
35                 force_sig_info(siginfo.si_signo,
36                                &siginfo, current);
37         } else {
38                 const struct exception_table_entry *fixup;
39
40                 /* Are we prepared to handle this kernel fault? */
41                 if ((fixup = search_exception_tables(regs->pc))) {
42                         struct pt_regs *tregs;
43                         /* Create a new four word stack frame, discarding the old
44                            one.  */
45                         regs->stkadj = frame_extra_sizes[regs->format];
46                         tregs = (struct pt_regs *)((ulong)regs + regs->stkadj);
47                         tregs->vector = regs->vector;
48                         tregs->format = 0;
49                         tregs->pc = fixup->fixup;
50                         tregs->sr = regs->sr;
51                         return -1;
52                 }
53
54                 //if (siginfo.si_signo == SIGBUS)
55                 //      force_sig_info(siginfo.si_signo,
56                 //                     &siginfo, current);
57
58                 /*
59                  * Oops. The kernel tried to access some bad page. We'll have to
60                  * terminate things with extreme prejudice.
61                  */
62                 if ((unsigned long)siginfo.si_addr < PAGE_SIZE)
63                         printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
64                 else
65                         printk(KERN_ALERT "Unable to handle kernel access");
66                 printk(" at virtual address %p\n", siginfo.si_addr);
67                 die_if_kernel("Oops", regs, 0 /*error_code*/);
68                 do_exit(SIGKILL);
69         }
70
71         return 1;
72 }
73
74 /*
75  * This routine handles page faults.  It determines the problem, and
76  * then passes it off to one of the appropriate routines.
77  *
78  * error_code:
79  *      bit 0 == 0 means no page found, 1 means protection fault
80  *      bit 1 == 0 means read, 1 means write
81  *
82  * If this routine detects a bad access, it returns 1, otherwise it
83  * returns 0.
84  */
85 int do_page_fault(struct pt_regs *regs, unsigned long address,
86                               unsigned long error_code)
87 {
88         struct mm_struct *mm = current->mm;
89         struct vm_area_struct * vma;
90         int write, fault;
91
92 #ifdef DEBUG
93         printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
94                 regs->sr, regs->pc, address, error_code,
95                 current->mm->pgd);
96 #endif
97
98         /*
99          * If we're in an interrupt or have no user
100          * context, we must not take the fault..
101          */
102         if (in_atomic() || !mm)
103                 goto no_context;
104
105         down_read(&mm->mmap_sem);
106
107         vma = find_vma(mm, address);
108         if (!vma)
109                 goto map_err;
110         if (vma->vm_flags & VM_IO)
111                 goto acc_err;
112         if (vma->vm_start <= address)
113                 goto good_area;
114         if (!(vma->vm_flags & VM_GROWSDOWN))
115                 goto map_err;
116         if (user_mode(regs)) {
117                 /* Accessing the stack below usp is always a bug.  The
118                    "+ 256" is there due to some instructions doing
119                    pre-decrement on the stack and that doesn't show up
120                    until later.  */
121                 if (address + 256 < rdusp())
122                         goto map_err;
123         }
124         if (expand_stack(vma, address))
125                 goto map_err;
126
127 /*
128  * Ok, we have a good vm_area for this memory access, so
129  * we can handle it..
130  */
131 good_area:
132 #ifdef DEBUG
133         printk("do_page_fault: good_area\n");
134 #endif
135         write = 0;
136         switch (error_code & 3) {
137                 default:        /* 3: write, present */
138                         /* fall through */
139                 case 2:         /* write, not present */
140                         if (!(vma->vm_flags & VM_WRITE))
141                                 goto acc_err;
142                         write++;
143                         break;
144                 case 1:         /* read, present */
145                         goto acc_err;
146                 case 0:         /* read, not present */
147                         if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
148                                 goto acc_err;
149         }
150
151         /*
152          * If for any reason at all we couldn't handle the fault,
153          * make sure we exit gracefully rather than endlessly redo
154          * the fault.
155          */
156
157  survive:
158         fault = handle_mm_fault(mm, vma, address, write);
159 #ifdef DEBUG
160         printk("handle_mm_fault returns %d\n",fault);
161 #endif
162         if (unlikely(fault & VM_FAULT_ERROR)) {
163                 if (fault & VM_FAULT_OOM)
164                         goto out_of_memory;
165                 else if (fault & VM_FAULT_SIGBUS)
166                         goto bus_err;
167                 BUG();
168         }
169         if (fault & VM_FAULT_MAJOR)
170                 current->maj_flt++;
171         else
172                 current->min_flt++;
173
174         up_read(&mm->mmap_sem);
175         return 0;
176
177 /*
178  * We ran out of memory, or some other thing happened to us that made
179  * us unable to handle the page fault gracefully.
180  */
181 out_of_memory:
182         up_read(&mm->mmap_sem);
183         if (is_init(current)) {
184                 yield();
185                 down_read(&mm->mmap_sem);
186                 goto survive;
187         }
188
189         printk("VM: killing process %s\n", current->comm);
190         if (user_mode(regs))
191                 do_exit(SIGKILL);
192
193 no_context:
194         current->thread.signo = SIGBUS;
195         current->thread.faddr = address;
196         return send_fault_sig(regs);
197
198 bus_err:
199         current->thread.signo = SIGBUS;
200         current->thread.code = BUS_ADRERR;
201         current->thread.faddr = address;
202         goto send_sig;
203
204 map_err:
205         current->thread.signo = SIGSEGV;
206         current->thread.code = SEGV_MAPERR;
207         current->thread.faddr = address;
208         goto send_sig;
209
210 acc_err:
211         current->thread.signo = SIGSEGV;
212         current->thread.code = SEGV_ACCERR;
213         current->thread.faddr = address;
214
215 send_sig:
216         up_read(&mm->mmap_sem);
217         return send_fault_sig(regs);
218 }