2 * linux/arch/arm/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2004 Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/signal.h>
14 #include <linux/hardirq.h>
15 #include <linux/init.h>
16 #include <linux/kprobes.h>
17 #include <linux/uaccess.h>
18 #include <linux/page-flags.h>
19 #include <linux/sched.h>
20 #include <linux/highmem.h>
21 #include <linux/perf_event.h>
23 #include <asm/exception.h>
24 #include <asm/system.h>
25 #include <asm/pgtable.h>
26 #include <asm/tlbflush.h>
33 static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
37 if (!user_mode(regs)) {
38 /* kprobe_running() needs smp_processor_id() */
40 if (kprobe_running() && kprobe_fault_handler(regs, fsr))
48 static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
55 * This is useful to dump out the page tables associated with
58 void show_pte(struct mm_struct *mm, unsigned long addr)
65 printk(KERN_ALERT "pgd = %p\n", mm->pgd);
66 pgd = pgd_offset(mm, addr);
67 printk(KERN_ALERT "[%08lx] *pgd=%08llx",
68 addr, (long long)pgd_val(*pgd));
83 pud = pud_offset(pgd, addr);
84 if (PTRS_PER_PUD != 1)
85 printk(", *pud=%08llx", (long long)pud_val(*pud));
95 pmd = pmd_offset(pud, addr);
96 if (PTRS_PER_PMD != 1)
97 printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
107 /* We must not map this if we have highmem enabled */
108 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
111 pte = pte_offset_map(pmd, addr);
112 printk(", *pte=%08llx", (long long)pte_val(*pte));
113 #ifndef CONFIG_ARM_LPAE
114 printk(", *ppte=%08llx",
115 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
122 #else /* CONFIG_MMU */
123 void show_pte(struct mm_struct *mm, unsigned long addr)
125 #endif /* CONFIG_MMU */
128 * Oops. The kernel tried to access some page that wasn't present.
131 __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
132 struct pt_regs *regs)
135 * Are we prepared to handle this kernel fault?
137 if (fixup_exception(regs))
141 * No handler, we'll have to terminate things with extreme prejudice.
145 "Unable to handle kernel %s at virtual address %08lx\n",
146 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
147 "paging request", addr);
150 die("Oops", regs, fsr);
156 * Something tried to access memory that isn't in our memory map..
157 * User mode accesses just cause a SIGSEGV
160 __do_user_fault(struct task_struct *tsk, unsigned long addr,
161 unsigned int fsr, unsigned int sig, int code,
162 struct pt_regs *regs)
166 #ifdef CONFIG_DEBUG_USER
167 if (user_debug & UDBG_SEGV) {
168 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
169 tsk->comm, sig, addr, fsr);
170 show_pte(tsk->mm, addr);
175 tsk->thread.address = addr;
176 tsk->thread.error_code = fsr;
177 tsk->thread.trap_no = 14;
181 si.si_addr = (void __user *)addr;
182 force_sig_info(sig, &si, tsk);
185 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
187 struct task_struct *tsk = current;
188 struct mm_struct *mm = tsk->active_mm;
191 * If we are in kernel mode at this point, we
192 * have no context to handle this fault with.
195 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
197 __do_kernel_fault(mm, addr, fsr, regs);
201 #define VM_FAULT_BADMAP 0x010000
202 #define VM_FAULT_BADACCESS 0x020000
205 * Check that the permissions on the VMA allow for the fault which occurred.
206 * If we encountered a write fault, we must have write permission, otherwise
207 * we allow any permission.
209 static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
211 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
215 if (fsr & FSR_LNX_PF)
218 return vma->vm_flags & mask ? false : true;
222 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
223 struct task_struct *tsk)
225 struct vm_area_struct *vma;
228 vma = find_vma(mm, addr);
229 fault = VM_FAULT_BADMAP;
232 if (unlikely(vma->vm_start > addr))
236 * Ok, we have a good vm_area for this
237 * memory access, so we can handle it.
240 if (access_error(fsr, vma)) {
241 fault = VM_FAULT_BADACCESS;
246 * If for any reason at all we couldn't handle the fault, make
247 * sure we exit gracefully rather than endlessly redo the fault.
249 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
250 if (unlikely(fault & VM_FAULT_ERROR))
252 if (fault & VM_FAULT_MAJOR)
259 /* Don't allow expansion below FIRST_USER_ADDRESS */
260 if (vma->vm_flags & VM_GROWSDOWN &&
261 addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
268 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
270 struct task_struct *tsk;
271 struct mm_struct *mm;
272 int fault, sig, code;
274 if (notify_page_fault(regs, fsr))
280 /* Enable interrupts if they were enabled in the parent context. */
281 if (interrupts_enabled(regs))
285 * If we're in an interrupt or have no user
286 * context, we must not take the fault..
288 if (in_atomic() || !mm)
292 * As per x86, we may deadlock here. However, since the kernel only
293 * validly references user space from well defined areas of the code,
294 * we can bug out early if this is from code which shouldn't.
296 if (!down_read_trylock(&mm->mmap_sem)) {
297 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
299 down_read(&mm->mmap_sem);
302 * The above down_read_trylock() might have succeeded in
303 * which case, we'll have missed the might_sleep() from
307 #ifdef CONFIG_DEBUG_VM
308 if (!user_mode(regs) &&
309 !search_exception_tables(regs->ARM_pc))
314 fault = __do_page_fault(mm, addr, fsr, tsk);
315 up_read(&mm->mmap_sem);
317 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
318 if (fault & VM_FAULT_MAJOR)
319 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
320 else if (fault & VM_FAULT_MINOR)
321 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
324 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
326 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
329 if (fault & VM_FAULT_OOM) {
331 * We ran out of memory, call the OOM killer, and return to
332 * userspace (which will retry the fault, or kill us if we
335 pagefault_out_of_memory();
340 * If we are in kernel mode at this point, we
341 * have no context to handle this fault with.
343 if (!user_mode(regs))
346 if (fault & VM_FAULT_SIGBUS) {
348 * We had some memory, but were unable to
349 * successfully fix up this page fault.
355 * Something tried to access memory that
356 * isn't in our memory map..
359 code = fault == VM_FAULT_BADACCESS ?
360 SEGV_ACCERR : SEGV_MAPERR;
363 __do_user_fault(tsk, addr, fsr, sig, code, regs);
367 __do_kernel_fault(mm, addr, fsr, regs);
370 #else /* CONFIG_MMU */
372 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
376 #endif /* CONFIG_MMU */
379 * First Level Translation Fault Handler
381 * We enter here because the first level page table doesn't contain
382 * a valid entry for the address.
384 * If the address is in kernel space (>= TASK_SIZE), then we are
385 * probably faulting in the vmalloc() area.
387 * If the init_task's first level page tables contains the relevant
388 * entry, we copy the it to this task. If not, we send the process
389 * a signal, fixup the exception, or oops the kernel.
391 * NOTE! We MUST NOT take any locks for this case. We may be in an
392 * interrupt or a critical region, and should only copy the information
393 * from the master page table, nothing more.
397 do_translation_fault(unsigned long addr, unsigned int fsr,
398 struct pt_regs *regs)
405 if (addr < TASK_SIZE)
406 return do_page_fault(addr, fsr, regs);
411 index = pgd_index(addr);
414 * FIXME: CP15 C1 is write only on ARMv3 architectures.
416 pgd = cpu_get_pgd() + index;
417 pgd_k = init_mm.pgd + index;
419 if (pgd_none(*pgd_k))
421 if (!pgd_present(*pgd))
422 set_pgd(pgd, *pgd_k);
424 pud = pud_offset(pgd, addr);
425 pud_k = pud_offset(pgd_k, addr);
427 if (pud_none(*pud_k))
429 if (!pud_present(*pud))
430 set_pud(pud, *pud_k);
432 pmd = pmd_offset(pud, addr);
433 pmd_k = pmd_offset(pud_k, addr);
435 #ifdef CONFIG_ARM_LPAE
437 * Only one hardware entry per PMD with LPAE.
442 * On ARM one Linux PGD entry contains two hardware entries (see page
443 * tables layout in pgtable.h). We normally guarantee that we always
444 * fill both L1 entries. But create_mapping() doesn't follow the rule.
445 * It can create inidividual L1 entries, so here we have to call
446 * pmd_none() check for the entry really corresponded to address, not
447 * for the first of pair.
449 index = (addr >> SECTION_SHIFT) & 1;
451 if (pmd_none(pmd_k[index]))
454 copy_pmd(pmd, pmd_k);
458 do_bad_area(addr, fsr, regs);
461 #else /* CONFIG_MMU */
463 do_translation_fault(unsigned long addr, unsigned int fsr,
464 struct pt_regs *regs)
468 #endif /* CONFIG_MMU */
471 * This abort handler always returns "fault".
474 do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
480 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
487 #ifdef CONFIG_ARM_LPAE
488 #include "fsr-3level.c"
490 #include "fsr-2level.c"
494 hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
495 int sig, int code, const char *name)
497 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
500 fsr_info[nr].fn = fn;
501 fsr_info[nr].sig = sig;
502 fsr_info[nr].code = code;
503 fsr_info[nr].name = name;
507 * Dispatch a data abort to the relevant handler.
509 asmlinkage void __exception
510 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
512 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
515 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
518 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
519 inf->name, fsr, addr);
521 info.si_signo = inf->sig;
523 info.si_code = inf->code;
524 info.si_addr = (void __user *)addr;
525 arm_notify_die("", regs, &info, fsr, 0);
529 hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
530 int sig, int code, const char *name)
532 if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
535 ifsr_info[nr].fn = fn;
536 ifsr_info[nr].sig = sig;
537 ifsr_info[nr].code = code;
538 ifsr_info[nr].name = name;
541 asmlinkage void __exception
542 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
544 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
547 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
550 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
551 inf->name, ifsr, addr);
553 info.si_signo = inf->sig;
555 info.si_code = inf->code;
556 info.si_addr = (void __user *)addr;
557 arm_notify_die("", regs, &info, ifsr, 0);
560 #ifndef CONFIG_ARM_LPAE
561 static int __init exceptions_init(void)
563 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
564 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
565 "I-cache maintenance fault");
568 if (cpu_architecture() >= CPU_ARCH_ARMv7) {
570 * TODO: Access flag faults introduced in ARMv6K.
571 * Runtime check for 'K' extension is needed
573 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
574 "section access flag fault");
575 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
576 "section access flag fault");
582 arch_initcall(exceptions_init);