vm: add VM_FAULT_SIGSEGV handling support
[pandora-kernel.git] / arch / sparc / mm / fault_32.c
1 /*
2  * fault.c:  Page fault handlers for the Sparc.
3  *
4  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5  * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6  * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7  */
8
9 #include <asm/head.h>
10
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/kdebug.h>
24
25 #include <asm/system.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/memreg.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
31 #include <asm/smp.h>
32 #include <asm/traps.h>
33 #include <asm/uaccess.h>
34
35 extern int prom_node_root;
36
37 int show_unhandled_signals = 1;
38
39 /* At boot time we determine these two values necessary for setting
40  * up the segment maps and page table entries (pte's).
41  */
42
43 int num_segmaps, num_contexts;
44 int invalid_segment;
45
46 /* various Virtual Address Cache parameters we find at boot time... */
47
48 int vac_size, vac_linesize, vac_do_hw_vac_flushes;
49 int vac_entries_per_context, vac_entries_per_segment;
50 int vac_entries_per_page;
51
52 /* Return how much physical memory we have.  */
53 unsigned long probe_memory(void)
54 {
55         unsigned long total = 0;
56         int i;
57
58         for (i = 0; sp_banks[i].num_bytes; i++)
59                 total += sp_banks[i].num_bytes;
60
61         return total;
62 }
63
64 extern void sun4c_complete_all_stores(void);
65
66 /* Whee, a level 15 NMI interrupt memory error.  Let's have fun... */
67 asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
68                                 unsigned long svaddr, unsigned long aerr,
69                                 unsigned long avaddr)
70 {
71         sun4c_complete_all_stores();
72         printk("FAULT: NMI received\n");
73         printk("SREGS: Synchronous Error %08lx\n", serr);
74         printk("       Synchronous Vaddr %08lx\n", svaddr);
75         printk("      Asynchronous Error %08lx\n", aerr);
76         printk("      Asynchronous Vaddr %08lx\n", avaddr);
77         if (sun4c_memerr_reg)
78                 printk("     Memory Parity Error %08lx\n", *sun4c_memerr_reg);
79         printk("REGISTER DUMP:\n");
80         show_regs(regs);
81         prom_halt();
82 }
83
84 static void unhandled_fault(unsigned long, struct task_struct *,
85                 struct pt_regs *) __attribute__ ((noreturn));
86
87 static void unhandled_fault(unsigned long address, struct task_struct *tsk,
88                      struct pt_regs *regs)
89 {
90         if((unsigned long) address < PAGE_SIZE) {
91                 printk(KERN_ALERT
92                     "Unable to handle kernel NULL pointer dereference\n");
93         } else {
94                 printk(KERN_ALERT "Unable to handle kernel paging request "
95                        "at virtual address %08lx\n", address);
96         }
97         printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
98                 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
99         printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
100                 (tsk->mm ? (unsigned long) tsk->mm->pgd :
101                         (unsigned long) tsk->active_mm->pgd));
102         die_if_kernel("Oops", regs);
103 }
104
105 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, 
106                             unsigned long address)
107 {
108         struct pt_regs regs;
109         unsigned long g2;
110         unsigned int insn;
111         int i;
112         
113         i = search_extables_range(ret_pc, &g2);
114         switch (i) {
115         case 3:
116                 /* load & store will be handled by fixup */
117                 return 3;
118
119         case 1:
120                 /* store will be handled by fixup, load will bump out */
121                 /* for _to_ macros */
122                 insn = *((unsigned int *) pc);
123                 if ((insn >> 21) & 1)
124                         return 1;
125                 break;
126
127         case 2:
128                 /* load will be handled by fixup, store will bump out */
129                 /* for _from_ macros */
130                 insn = *((unsigned int *) pc);
131                 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
132                         return 2; 
133                 break; 
134
135         default:
136                 break;
137         }
138
139         memset(&regs, 0, sizeof (regs));
140         regs.pc = pc;
141         regs.npc = pc + 4;
142         __asm__ __volatile__(
143                 "rd %%psr, %0\n\t"
144                 "nop\n\t"
145                 "nop\n\t"
146                 "nop\n" : "=r" (regs.psr));
147         unhandled_fault(address, current, &regs);
148
149         /* Not reached */
150         return 0;
151 }
152
153 static inline void
154 show_signal_msg(struct pt_regs *regs, int sig, int code,
155                 unsigned long address, struct task_struct *tsk)
156 {
157         if (!unhandled_signal(tsk, sig))
158                 return;
159
160         if (!printk_ratelimit())
161                 return;
162
163         printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
164                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
165                tsk->comm, task_pid_nr(tsk), address,
166                (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
167                (void *)regs->u_regs[UREG_FP], code);
168
169         print_vma_addr(KERN_CONT " in ", regs->pc);
170
171         printk(KERN_CONT "\n");
172 }
173
174 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
175                                unsigned long addr)
176 {
177         siginfo_t info;
178
179         info.si_signo = sig;
180         info.si_code = code;
181         info.si_errno = 0;
182         info.si_addr = (void __user *) addr;
183         info.si_trapno = 0;
184
185         if (unlikely(show_unhandled_signals))
186                 show_signal_msg(regs, sig, info.si_code,
187                                 addr, current);
188
189         force_sig_info (sig, &info, current);
190 }
191
192 extern unsigned long safe_compute_effective_address(struct pt_regs *,
193                                                     unsigned int);
194
195 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
196 {
197         unsigned int insn;
198
199         if (text_fault)
200                 return regs->pc;
201
202         if (regs->psr & PSR_PS) {
203                 insn = *(unsigned int *) regs->pc;
204         } else {
205                 __get_user(insn, (unsigned int *) regs->pc);
206         }
207
208         return safe_compute_effective_address(regs, insn);
209 }
210
211 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
212                                       int text_fault)
213 {
214         unsigned long addr = compute_si_addr(regs, text_fault);
215
216         __do_fault_siginfo(code, sig, regs, addr);
217 }
218
219 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
220                                unsigned long address)
221 {
222         struct vm_area_struct *vma;
223         struct task_struct *tsk = current;
224         struct mm_struct *mm = tsk->mm;
225         unsigned int fixup;
226         unsigned long g2;
227         int from_user = !(regs->psr & PSR_PS);
228         int fault, code;
229
230         if(text_fault)
231                 address = regs->pc;
232
233         /*
234          * We fault-in kernel-space virtual memory on-demand. The
235          * 'reference' page table is init_mm.pgd.
236          *
237          * NOTE! We MUST NOT take any locks for this case. We may
238          * be in an interrupt or a critical region, and should
239          * only copy the information from the master page table,
240          * nothing more.
241          */
242         code = SEGV_MAPERR;
243         if (!ARCH_SUN4C && address >= TASK_SIZE)
244                 goto vmalloc_fault;
245
246         /*
247          * If we're in an interrupt or have no user
248          * context, we must not take the fault..
249          */
250         if (in_atomic() || !mm)
251                 goto no_context;
252
253         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
254
255         down_read(&mm->mmap_sem);
256
257         /*
258          * The kernel referencing a bad kernel pointer can lock up
259          * a sun4c machine completely, so we must attempt recovery.
260          */
261         if(!from_user && address >= PAGE_OFFSET)
262                 goto bad_area;
263
264         vma = find_vma(mm, address);
265         if(!vma)
266                 goto bad_area;
267         if(vma->vm_start <= address)
268                 goto good_area;
269         if(!(vma->vm_flags & VM_GROWSDOWN))
270                 goto bad_area;
271         if(expand_stack(vma, address))
272                 goto bad_area;
273         /*
274          * Ok, we have a good vm_area for this memory access, so
275          * we can handle it..
276          */
277 good_area:
278         code = SEGV_ACCERR;
279         if(write) {
280                 if(!(vma->vm_flags & VM_WRITE))
281                         goto bad_area;
282         } else {
283                 /* Allow reads even for write-only mappings */
284                 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
285                         goto bad_area;
286         }
287
288         /*
289          * If for any reason at all we couldn't handle the fault,
290          * make sure we exit gracefully rather than endlessly redo
291          * the fault.
292          */
293         fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
294         if (unlikely(fault & VM_FAULT_ERROR)) {
295                 if (fault & VM_FAULT_OOM)
296                         goto out_of_memory;
297                 else if (fault & VM_FAULT_SIGSEGV)
298                         goto bad_area;
299                 else if (fault & VM_FAULT_SIGBUS)
300                         goto do_sigbus;
301                 BUG();
302         }
303         if (fault & VM_FAULT_MAJOR) {
304                 current->maj_flt++;
305                 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
306         } else {
307                 current->min_flt++;
308                 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
309         }
310         up_read(&mm->mmap_sem);
311         return;
312
313         /*
314          * Something tried to access memory that isn't in our memory map..
315          * Fix it, but check if it's kernel or user first..
316          */
317 bad_area:
318         up_read(&mm->mmap_sem);
319
320 bad_area_nosemaphore:
321         /* User mode accesses just cause a SIGSEGV */
322         if (from_user) {
323                 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
324                 return;
325         }
326
327         /* Is this in ex_table? */
328 no_context:
329         g2 = regs->u_regs[UREG_G2];
330         if (!from_user) {
331                 fixup = search_extables_range(regs->pc, &g2);
332                 if (fixup > 10) { /* Values below are reserved for other things */
333                         extern const unsigned __memset_start[];
334                         extern const unsigned __memset_end[];
335                         extern const unsigned __csum_partial_copy_start[];
336                         extern const unsigned __csum_partial_copy_end[];
337
338 #ifdef DEBUG_EXCEPTIONS
339                         printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
340                         printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
341                                 regs->pc, fixup, g2);
342 #endif
343                         if ((regs->pc >= (unsigned long)__memset_start &&
344                              regs->pc < (unsigned long)__memset_end) ||
345                             (regs->pc >= (unsigned long)__csum_partial_copy_start &&
346                              regs->pc < (unsigned long)__csum_partial_copy_end)) {
347                                 regs->u_regs[UREG_I4] = address;
348                                 regs->u_regs[UREG_I5] = regs->pc;
349                         }
350                         regs->u_regs[UREG_G2] = g2;
351                         regs->pc = fixup;
352                         regs->npc = regs->pc + 4;
353                         return;
354                 }
355         }
356         
357         unhandled_fault (address, tsk, regs);
358         do_exit(SIGKILL);
359
360 /*
361  * We ran out of memory, or some other thing happened to us that made
362  * us unable to handle the page fault gracefully.
363  */
364 out_of_memory:
365         up_read(&mm->mmap_sem);
366         if (from_user) {
367                 pagefault_out_of_memory();
368                 return;
369         }
370         goto no_context;
371
372 do_sigbus:
373         up_read(&mm->mmap_sem);
374         do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
375         if (!from_user)
376                 goto no_context;
377
378 vmalloc_fault:
379         {
380                 /*
381                  * Synchronize this task's top level page-table
382                  * with the 'reference' page table.
383                  */
384                 int offset = pgd_index(address);
385                 pgd_t *pgd, *pgd_k;
386                 pmd_t *pmd, *pmd_k;
387
388                 pgd = tsk->active_mm->pgd + offset;
389                 pgd_k = init_mm.pgd + offset;
390
391                 if (!pgd_present(*pgd)) {
392                         if (!pgd_present(*pgd_k))
393                                 goto bad_area_nosemaphore;
394                         pgd_val(*pgd) = pgd_val(*pgd_k);
395                         return;
396                 }
397
398                 pmd = pmd_offset(pgd, address);
399                 pmd_k = pmd_offset(pgd_k, address);
400
401                 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
402                         goto bad_area_nosemaphore;
403                 *pmd = *pmd_k;
404                 return;
405         }
406 }
407
408 asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
409                                unsigned long address)
410 {
411         extern void sun4c_update_mmu_cache(struct vm_area_struct *,
412                                            unsigned long,pte_t *);
413         extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
414         struct task_struct *tsk = current;
415         struct mm_struct *mm = tsk->mm;
416         pgd_t *pgdp;
417         pte_t *ptep;
418
419         if (text_fault) {
420                 address = regs->pc;
421         } else if (!write &&
422                    !(regs->psr & PSR_PS)) {
423                 unsigned int insn, __user *ip;
424
425                 ip = (unsigned int __user *)regs->pc;
426                 if (!get_user(insn, ip)) {
427                         if ((insn & 0xc1680000) == 0xc0680000)
428                                 write = 1;
429                 }
430         }
431
432         if (!mm) {
433                 /* We are oopsing. */
434                 do_sparc_fault(regs, text_fault, write, address);
435                 BUG();  /* P3 Oops already, you bitch */
436         }
437
438         pgdp = pgd_offset(mm, address);
439         ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
440
441         if (pgd_val(*pgdp)) {
442             if (write) {
443                 if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
444                                    == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
445                         unsigned long flags;
446
447                         *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
448                                       _SUN4C_PAGE_MODIFIED |
449                                       _SUN4C_PAGE_VALID |
450                                       _SUN4C_PAGE_DIRTY);
451
452                         local_irq_save(flags);
453                         if (sun4c_get_segmap(address) != invalid_segment) {
454                                 sun4c_put_pte(address, pte_val(*ptep));
455                                 local_irq_restore(flags);
456                                 return;
457                         }
458                         local_irq_restore(flags);
459                 }
460             } else {
461                 if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
462                                    == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
463                         unsigned long flags;
464
465                         *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
466                                       _SUN4C_PAGE_VALID);
467
468                         local_irq_save(flags);
469                         if (sun4c_get_segmap(address) != invalid_segment) {
470                                 sun4c_put_pte(address, pte_val(*ptep));
471                                 local_irq_restore(flags);
472                                 return;
473                         }
474                         local_irq_restore(flags);
475                 }
476             }
477         }
478
479         /* This conditional is 'interesting'. */
480         if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
481             && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
482                 /* Note: It is safe to not grab the MMAP semaphore here because
483                  *       we know that update_mmu_cache() will not sleep for
484                  *       any reason (at least not in the current implementation)
485                  *       and therefore there is no danger of another thread getting
486                  *       on the CPU and doing a shrink_mmap() on this vma.
487                  */
488                 sun4c_update_mmu_cache (find_vma(current->mm, address), address,
489                                         ptep);
490         else
491                 do_sparc_fault(regs, text_fault, write, address);
492 }
493
494 /* This always deals with user addresses. */
495 static void force_user_fault(unsigned long address, int write)
496 {
497         struct vm_area_struct *vma;
498         struct task_struct *tsk = current;
499         struct mm_struct *mm = tsk->mm;
500         int code;
501
502         code = SEGV_MAPERR;
503
504         down_read(&mm->mmap_sem);
505         vma = find_vma(mm, address);
506         if(!vma)
507                 goto bad_area;
508         if(vma->vm_start <= address)
509                 goto good_area;
510         if(!(vma->vm_flags & VM_GROWSDOWN))
511                 goto bad_area;
512         if(expand_stack(vma, address))
513                 goto bad_area;
514 good_area:
515         code = SEGV_ACCERR;
516         if(write) {
517                 if(!(vma->vm_flags & VM_WRITE))
518                         goto bad_area;
519         } else {
520                 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
521                         goto bad_area;
522         }
523         switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
524         case VM_FAULT_SIGBUS:
525         case VM_FAULT_OOM:
526                 goto do_sigbus;
527         }
528         up_read(&mm->mmap_sem);
529         return;
530 bad_area:
531         up_read(&mm->mmap_sem);
532         __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
533         return;
534
535 do_sigbus:
536         up_read(&mm->mmap_sem);
537         __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
538 }
539
540 static void check_stack_aligned(unsigned long sp)
541 {
542         if (sp & 0x7UL)
543                 force_sig(SIGILL, current);
544 }
545
546 void window_overflow_fault(void)
547 {
548         unsigned long sp;
549
550         sp = current_thread_info()->rwbuf_stkptrs[0];
551         if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
552                 force_user_fault(sp + 0x38, 1);
553         force_user_fault(sp, 1);
554
555         check_stack_aligned(sp);
556 }
557
558 void window_underflow_fault(unsigned long sp)
559 {
560         if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
561                 force_user_fault(sp + 0x38, 0);
562         force_user_fault(sp, 0);
563
564         check_stack_aligned(sp);
565 }
566
567 void window_ret_fault(struct pt_regs *regs)
568 {
569         unsigned long sp;
570
571         sp = regs->u_regs[UREG_FP];
572         if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
573                 force_user_fault(sp + 0x38, 0);
574         force_user_fault(sp, 0);
575
576         check_stack_aligned(sp);
577 }