2 * fault.c: Page fault handlers for the Sparc.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/kdebug.h>
25 #include <asm/system.h>
27 #include <asm/pgtable.h>
28 #include <asm/memreg.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
32 #include <asm/traps.h>
33 #include <asm/uaccess.h>
35 extern int prom_node_root;
37 int show_unhandled_signals = 1;
39 /* At boot time we determine these two values necessary for setting
40 * up the segment maps and page table entries (pte's).
43 int num_segmaps, num_contexts;
46 /* various Virtual Address Cache parameters we find at boot time... */
48 int vac_size, vac_linesize, vac_do_hw_vac_flushes;
49 int vac_entries_per_context, vac_entries_per_segment;
50 int vac_entries_per_page;
52 /* Return how much physical memory we have. */
53 unsigned long probe_memory(void)
55 unsigned long total = 0;
58 for (i = 0; sp_banks[i].num_bytes; i++)
59 total += sp_banks[i].num_bytes;
64 extern void sun4c_complete_all_stores(void);
66 /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
67 asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
68 unsigned long svaddr, unsigned long aerr,
71 sun4c_complete_all_stores();
72 printk("FAULT: NMI received\n");
73 printk("SREGS: Synchronous Error %08lx\n", serr);
74 printk(" Synchronous Vaddr %08lx\n", svaddr);
75 printk(" Asynchronous Error %08lx\n", aerr);
76 printk(" Asynchronous Vaddr %08lx\n", avaddr);
78 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
79 printk("REGISTER DUMP:\n");
84 static void unhandled_fault(unsigned long, struct task_struct *,
85 struct pt_regs *) __attribute__ ((noreturn));
87 static void unhandled_fault(unsigned long address, struct task_struct *tsk,
90 if((unsigned long) address < PAGE_SIZE) {
92 "Unable to handle kernel NULL pointer dereference\n");
94 printk(KERN_ALERT "Unable to handle kernel paging request "
95 "at virtual address %08lx\n", address);
97 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
98 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
99 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
100 (tsk->mm ? (unsigned long) tsk->mm->pgd :
101 (unsigned long) tsk->active_mm->pgd));
102 die_if_kernel("Oops", regs);
105 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
106 unsigned long address)
113 i = search_extables_range(ret_pc, &g2);
116 /* load & store will be handled by fixup */
120 /* store will be handled by fixup, load will bump out */
121 /* for _to_ macros */
122 insn = *((unsigned int *) pc);
123 if ((insn >> 21) & 1)
128 /* load will be handled by fixup, store will bump out */
129 /* for _from_ macros */
130 insn = *((unsigned int *) pc);
131 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
139 memset(®s, 0, sizeof (regs));
142 __asm__ __volatile__(
146 "nop\n" : "=r" (regs.psr));
147 unhandled_fault(address, current, ®s);
154 show_signal_msg(struct pt_regs *regs, int sig, int code,
155 unsigned long address, struct task_struct *tsk)
157 if (!unhandled_signal(tsk, sig))
160 if (!printk_ratelimit())
163 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
164 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
165 tsk->comm, task_pid_nr(tsk), address,
166 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
167 (void *)regs->u_regs[UREG_FP], code);
169 print_vma_addr(KERN_CONT " in ", regs->pc);
171 printk(KERN_CONT "\n");
174 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
182 info.si_addr = (void __user *) addr;
185 if (unlikely(show_unhandled_signals))
186 show_signal_msg(regs, sig, info.si_code,
189 force_sig_info (sig, &info, current);
192 extern unsigned long safe_compute_effective_address(struct pt_regs *,
195 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
202 if (regs->psr & PSR_PS) {
203 insn = *(unsigned int *) regs->pc;
205 __get_user(insn, (unsigned int *) regs->pc);
208 return safe_compute_effective_address(regs, insn);
211 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
214 unsigned long addr = compute_si_addr(regs, text_fault);
216 __do_fault_siginfo(code, sig, regs, addr);
219 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
220 unsigned long address)
222 struct vm_area_struct *vma;
223 struct task_struct *tsk = current;
224 struct mm_struct *mm = tsk->mm;
227 int from_user = !(regs->psr & PSR_PS);
234 * We fault-in kernel-space virtual memory on-demand. The
235 * 'reference' page table is init_mm.pgd.
237 * NOTE! We MUST NOT take any locks for this case. We may
238 * be in an interrupt or a critical region, and should
239 * only copy the information from the master page table,
243 if (!ARCH_SUN4C && address >= TASK_SIZE)
247 * If we're in an interrupt or have no user
248 * context, we must not take the fault..
250 if (in_atomic() || !mm)
253 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
255 down_read(&mm->mmap_sem);
258 * The kernel referencing a bad kernel pointer can lock up
259 * a sun4c machine completely, so we must attempt recovery.
261 if(!from_user && address >= PAGE_OFFSET)
264 vma = find_vma(mm, address);
267 if(vma->vm_start <= address)
269 if(!(vma->vm_flags & VM_GROWSDOWN))
271 if(expand_stack(vma, address))
274 * Ok, we have a good vm_area for this memory access, so
280 if(!(vma->vm_flags & VM_WRITE))
283 /* Allow reads even for write-only mappings */
284 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
289 * If for any reason at all we couldn't handle the fault,
290 * make sure we exit gracefully rather than endlessly redo
293 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
294 if (unlikely(fault & VM_FAULT_ERROR)) {
295 if (fault & VM_FAULT_OOM)
297 else if (fault & VM_FAULT_SIGSEGV)
299 else if (fault & VM_FAULT_SIGBUS)
303 if (fault & VM_FAULT_MAJOR) {
305 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
308 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
310 up_read(&mm->mmap_sem);
314 * Something tried to access memory that isn't in our memory map..
315 * Fix it, but check if it's kernel or user first..
318 up_read(&mm->mmap_sem);
320 bad_area_nosemaphore:
321 /* User mode accesses just cause a SIGSEGV */
323 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
327 /* Is this in ex_table? */
329 g2 = regs->u_regs[UREG_G2];
331 fixup = search_extables_range(regs->pc, &g2);
332 if (fixup > 10) { /* Values below are reserved for other things */
333 extern const unsigned __memset_start[];
334 extern const unsigned __memset_end[];
335 extern const unsigned __csum_partial_copy_start[];
336 extern const unsigned __csum_partial_copy_end[];
338 #ifdef DEBUG_EXCEPTIONS
339 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
340 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
341 regs->pc, fixup, g2);
343 if ((regs->pc >= (unsigned long)__memset_start &&
344 regs->pc < (unsigned long)__memset_end) ||
345 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
346 regs->pc < (unsigned long)__csum_partial_copy_end)) {
347 regs->u_regs[UREG_I4] = address;
348 regs->u_regs[UREG_I5] = regs->pc;
350 regs->u_regs[UREG_G2] = g2;
352 regs->npc = regs->pc + 4;
357 unhandled_fault (address, tsk, regs);
361 * We ran out of memory, or some other thing happened to us that made
362 * us unable to handle the page fault gracefully.
365 up_read(&mm->mmap_sem);
367 pagefault_out_of_memory();
373 up_read(&mm->mmap_sem);
374 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
381 * Synchronize this task's top level page-table
382 * with the 'reference' page table.
384 int offset = pgd_index(address);
388 pgd = tsk->active_mm->pgd + offset;
389 pgd_k = init_mm.pgd + offset;
391 if (!pgd_present(*pgd)) {
392 if (!pgd_present(*pgd_k))
393 goto bad_area_nosemaphore;
394 pgd_val(*pgd) = pgd_val(*pgd_k);
398 pmd = pmd_offset(pgd, address);
399 pmd_k = pmd_offset(pgd_k, address);
401 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
402 goto bad_area_nosemaphore;
408 asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
409 unsigned long address)
411 extern void sun4c_update_mmu_cache(struct vm_area_struct *,
412 unsigned long,pte_t *);
413 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
414 struct task_struct *tsk = current;
415 struct mm_struct *mm = tsk->mm;
422 !(regs->psr & PSR_PS)) {
423 unsigned int insn, __user *ip;
425 ip = (unsigned int __user *)regs->pc;
426 if (!get_user(insn, ip)) {
427 if ((insn & 0xc1680000) == 0xc0680000)
433 /* We are oopsing. */
434 do_sparc_fault(regs, text_fault, write, address);
435 BUG(); /* P3 Oops already, you bitch */
438 pgdp = pgd_offset(mm, address);
439 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
441 if (pgd_val(*pgdp)) {
443 if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
444 == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
447 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
448 _SUN4C_PAGE_MODIFIED |
452 local_irq_save(flags);
453 if (sun4c_get_segmap(address) != invalid_segment) {
454 sun4c_put_pte(address, pte_val(*ptep));
455 local_irq_restore(flags);
458 local_irq_restore(flags);
461 if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
462 == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
465 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
468 local_irq_save(flags);
469 if (sun4c_get_segmap(address) != invalid_segment) {
470 sun4c_put_pte(address, pte_val(*ptep));
471 local_irq_restore(flags);
474 local_irq_restore(flags);
479 /* This conditional is 'interesting'. */
480 if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
481 && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
482 /* Note: It is safe to not grab the MMAP semaphore here because
483 * we know that update_mmu_cache() will not sleep for
484 * any reason (at least not in the current implementation)
485 * and therefore there is no danger of another thread getting
486 * on the CPU and doing a shrink_mmap() on this vma.
488 sun4c_update_mmu_cache (find_vma(current->mm, address), address,
491 do_sparc_fault(regs, text_fault, write, address);
494 /* This always deals with user addresses. */
495 static void force_user_fault(unsigned long address, int write)
497 struct vm_area_struct *vma;
498 struct task_struct *tsk = current;
499 struct mm_struct *mm = tsk->mm;
504 down_read(&mm->mmap_sem);
505 vma = find_vma(mm, address);
508 if(vma->vm_start <= address)
510 if(!(vma->vm_flags & VM_GROWSDOWN))
512 if(expand_stack(vma, address))
517 if(!(vma->vm_flags & VM_WRITE))
520 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
523 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
524 case VM_FAULT_SIGBUS:
528 up_read(&mm->mmap_sem);
531 up_read(&mm->mmap_sem);
532 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
536 up_read(&mm->mmap_sem);
537 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
540 static void check_stack_aligned(unsigned long sp)
543 force_sig(SIGILL, current);
546 void window_overflow_fault(void)
550 sp = current_thread_info()->rwbuf_stkptrs[0];
551 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
552 force_user_fault(sp + 0x38, 1);
553 force_user_fault(sp, 1);
555 check_stack_aligned(sp);
558 void window_underflow_fault(unsigned long sp)
560 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
561 force_user_fault(sp + 0x38, 0);
562 force_user_fault(sp, 0);
564 check_stack_aligned(sp);
567 void window_ret_fault(struct pt_regs *regs)
571 sp = regs->u_regs[UREG_FP];
572 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
573 force_user_fault(sp + 0x38, 0);
574 force_user_fault(sp, 0);
576 check_stack_aligned(sp);