pte_unmap(page_table);
+ /* File mapping without ->vm_ops ? */
+ if (vma->vm_flags & VM_SHARED)
+ return VM_FAULT_SIGBUS;
+
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
return VM_FAULT_SIGSEGV;
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pte_unmap(page_table);
+ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+ if (!vma->vm_ops->fault)
+ return VM_FAULT_SIGBUS;
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
entry = *pte;
if (!pte_present(entry)) {
if (pte_none(entry)) {
- if (vma->vm_ops) {
- if (likely(vma->vm_ops->fault))
- return do_linear_fault(mm, vma, address,
+ if (vma->vm_ops)
+ return do_linear_fault(mm, vma, address,
pte, pmd, flags, entry);
- }
return do_anonymous_page(mm, vma, address,
pte, pmd, flags);
}
if (follow_phys(vma, addr, write, &prot, &phys_addr))
return -EINVAL;
- maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
+ maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (write)
memcpy_toio(maddr + offset, buf, len);
else
vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
break;
- if (vma->vm_ops && vma->vm_ops->access)
+ if ((vma->vm_flags & VM_PFNMAP) &&
+ !(vma->vm_flags & VM_IO))
+ ret = generic_access_phys(vma, addr, buf,
+ len, write);
+ if (ret <= 0 && vma->vm_ops && vma->vm_ops->access)
ret = vma->vm_ops->access(vma, addr, buf,
len, write);
if (ret <= 0)