Merge branch 'fix/misc' into for-linus
[pandora-kernel.git] / arch / sh / mm / fault_32.c
1 /*
2  * Page fault handler for SH with an MMU.
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003 - 2008  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/fault.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/perf_counter.h>
19 #include <asm/io_trapped.h>
20 #include <asm/system.h>
21 #include <asm/mmu_context.h>
22 #include <asm/tlbflush.h>
23
24 static inline int notify_page_fault(struct pt_regs *regs, int trap)
25 {
26         int ret = 0;
27
28 #ifdef CONFIG_KPROBES
29         if (!user_mode(regs)) {
30                 preempt_disable();
31                 if (kprobe_running() && kprobe_fault_handler(regs, trap))
32                         ret = 1;
33                 preempt_enable();
34         }
35 #endif
36
37         return ret;
38 }
39
40 /*
41  * This routine handles page faults.  It determines the address,
42  * and the problem, and then passes it off to one of the appropriate
43  * routines.
44  */
45 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
46                                         unsigned long writeaccess,
47                                         unsigned long address)
48 {
49         struct task_struct *tsk;
50         struct mm_struct *mm;
51         struct vm_area_struct * vma;
52         int si_code;
53         int fault;
54         siginfo_t info;
55
56         /*
57          * We don't bother with any notifier callbacks here, as they are
58          * all handled through the __do_page_fault() fast-path.
59          */
60
61         tsk = current;
62         si_code = SEGV_MAPERR;
63
64         if (unlikely(address >= TASK_SIZE)) {
65                 /*
66                  * Synchronize this task's top level page-table
67                  * with the 'reference' page table.
68                  *
69                  * Do _not_ use "tsk" here. We might be inside
70                  * an interrupt in the middle of a task switch..
71                  */
72                 int offset = pgd_index(address);
73                 pgd_t *pgd, *pgd_k;
74                 pud_t *pud, *pud_k;
75                 pmd_t *pmd, *pmd_k;
76
77                 pgd = get_TTB() + offset;
78                 pgd_k = swapper_pg_dir + offset;
79
80                 if (!pgd_present(*pgd)) {
81                         if (!pgd_present(*pgd_k))
82                                 goto bad_area_nosemaphore;
83                         set_pgd(pgd, *pgd_k);
84                         return;
85                 }
86
87                 pud = pud_offset(pgd, address);
88                 pud_k = pud_offset(pgd_k, address);
89
90                 if (!pud_present(*pud)) {
91                         if (!pud_present(*pud_k))
92                                 goto bad_area_nosemaphore;
93                         set_pud(pud, *pud_k);
94                         return;
95                 }
96
97                 pmd = pmd_offset(pud, address);
98                 pmd_k = pmd_offset(pud_k, address);
99                 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
100                         goto bad_area_nosemaphore;
101                 set_pmd(pmd, *pmd_k);
102
103                 return;
104         }
105
106         mm = tsk->mm;
107
108         if (unlikely(notify_page_fault(regs, lookup_exception_vector())))
109                 return;
110
111         /* Only enable interrupts if they were on before the fault */
112         if ((regs->sr & SR_IMASK) != SR_IMASK)
113                 local_irq_enable();
114
115         perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
116
117         /*
118          * If we're in an interrupt or have no user
119          * context, we must not take the fault..
120          */
121         if (in_atomic() || !mm)
122                 goto no_context;
123
124         down_read(&mm->mmap_sem);
125
126         vma = find_vma(mm, address);
127         if (!vma)
128                 goto bad_area;
129         if (vma->vm_start <= address)
130                 goto good_area;
131         if (!(vma->vm_flags & VM_GROWSDOWN))
132                 goto bad_area;
133         if (expand_stack(vma, address))
134                 goto bad_area;
135 /*
136  * Ok, we have a good vm_area for this memory access, so
137  * we can handle it..
138  */
139 good_area:
140         si_code = SEGV_ACCERR;
141         if (writeaccess) {
142                 if (!(vma->vm_flags & VM_WRITE))
143                         goto bad_area;
144         } else {
145                 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
146                         goto bad_area;
147         }
148
149         /*
150          * If for any reason at all we couldn't handle the fault,
151          * make sure we exit gracefully rather than endlessly redo
152          * the fault.
153          */
154 survive:
155         fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
156         if (unlikely(fault & VM_FAULT_ERROR)) {
157                 if (fault & VM_FAULT_OOM)
158                         goto out_of_memory;
159                 else if (fault & VM_FAULT_SIGBUS)
160                         goto do_sigbus;
161                 BUG();
162         }
163         if (fault & VM_FAULT_MAJOR) {
164                 tsk->maj_flt++;
165                 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
166                                      regs, address);
167         } else {
168                 tsk->min_flt++;
169                 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
170                                      regs, address);
171         }
172
173         up_read(&mm->mmap_sem);
174         return;
175
176 /*
177  * Something tried to access memory that isn't in our memory map..
178  * Fix it, but check if it's kernel or user first..
179  */
180 bad_area:
181         up_read(&mm->mmap_sem);
182
183 bad_area_nosemaphore:
184         if (user_mode(regs)) {
185                 info.si_signo = SIGSEGV;
186                 info.si_errno = 0;
187                 info.si_code = si_code;
188                 info.si_addr = (void *) address;
189                 force_sig_info(SIGSEGV, &info, tsk);
190                 return;
191         }
192
193 no_context:
194         /* Are we prepared to handle this kernel fault?  */
195         if (fixup_exception(regs))
196                 return;
197
198         if (handle_trapped_io(regs, address))
199                 return;
200 /*
201  * Oops. The kernel tried to access some bad page. We'll have to
202  * terminate things with extreme prejudice.
203  *
204  */
205
206         bust_spinlocks(1);
207
208         if (oops_may_print()) {
209                 unsigned long page;
210
211                 if (address < PAGE_SIZE)
212                         printk(KERN_ALERT "Unable to handle kernel NULL "
213                                           "pointer dereference");
214                 else
215                         printk(KERN_ALERT "Unable to handle kernel paging "
216                                           "request");
217                 printk(" at virtual address %08lx\n", address);
218                 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
219                 page = (unsigned long)get_TTB();
220                 if (page) {
221                         page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
222                         printk(KERN_ALERT "*pde = %08lx\n", page);
223                         if (page & _PAGE_PRESENT) {
224                                 page &= PAGE_MASK;
225                                 address &= 0x003ff000;
226                                 page = ((__typeof__(page) *)
227                                                 __va(page))[address >>
228                                                             PAGE_SHIFT];
229                                 printk(KERN_ALERT "*pte = %08lx\n", page);
230                         }
231                 }
232         }
233
234         die("Oops", regs, writeaccess);
235         bust_spinlocks(0);
236         do_exit(SIGKILL);
237
238 /*
239  * We ran out of memory, or some other thing happened to us that made
240  * us unable to handle the page fault gracefully.
241  */
242 out_of_memory:
243         up_read(&mm->mmap_sem);
244         if (is_global_init(current)) {
245                 yield();
246                 down_read(&mm->mmap_sem);
247                 goto survive;
248         }
249         printk("VM: killing process %s\n", tsk->comm);
250         if (user_mode(regs))
251                 do_group_exit(SIGKILL);
252         goto no_context;
253
254 do_sigbus:
255         up_read(&mm->mmap_sem);
256
257         /*
258          * Send a sigbus, regardless of whether we were in kernel
259          * or user mode.
260          */
261         info.si_signo = SIGBUS;
262         info.si_errno = 0;
263         info.si_code = BUS_ADRERR;
264         info.si_addr = (void *)address;
265         force_sig_info(SIGBUS, &info, tsk);
266
267         /* Kernel mode? Handle exceptions or die */
268         if (!user_mode(regs))
269                 goto no_context;
270 }
271
272 /*
273  * Called with interrupts disabled.
274  */
275 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
276                                          unsigned long writeaccess,
277                                          unsigned long address)
278 {
279         pgd_t *pgd;
280         pud_t *pud;
281         pmd_t *pmd;
282         pte_t *pte;
283         pte_t entry;
284         int ret = 1;
285
286         /*
287          * We don't take page faults for P1, P2, and parts of P4, these
288          * are always mapped, whether it be due to legacy behaviour in
289          * 29-bit mode, or due to PMB configuration in 32-bit mode.
290          */
291         if (address >= P3SEG && address < P3_ADDR_MAX) {
292                 pgd = pgd_offset_k(address);
293         } else {
294                 if (unlikely(address >= TASK_SIZE || !current->mm))
295                         goto out;
296
297                 pgd = pgd_offset(current->mm, address);
298         }
299
300         pud = pud_offset(pgd, address);
301         if (pud_none_or_clear_bad(pud))
302                 goto out;
303         pmd = pmd_offset(pud, address);
304         if (pmd_none_or_clear_bad(pmd))
305                 goto out;
306         pte = pte_offset_kernel(pmd, address);
307         entry = *pte;
308         if (unlikely(pte_none(entry) || pte_not_present(entry)))
309                 goto out;
310         if (unlikely(writeaccess && !pte_write(entry)))
311                 goto out;
312
313         if (writeaccess)
314                 entry = pte_mkdirty(entry);
315         entry = pte_mkyoung(entry);
316
317 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
318         /*
319          * ITLB is not affected by "ldtlb" instruction.
320          * So, we need to flush the entry by ourselves.
321          */
322         local_flush_tlb_one(get_asid(), address & PAGE_MASK);
323 #endif
324
325         set_pte(pte, entry);
326         update_mmu_cache(NULL, address, entry);
327
328         ret = 0;
329 out:
330         return ret;
331 }