X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmemory.c;h=79c71de0da3c1562c2d93251d300880aa642b281;hb=935299e1f769a91948a9f66be654024d6b1f204d;hp=628cadca97efa7673ec6ddb3b3cff46d6dc19a3b;hpb=d528656f0d33ff97a4aea48f2f1b66535fdfa880;p=pandora-kernel.git diff --git a/mm/memory.c b/mm/memory.c index 628cadca97ef..79c71de0da3c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1403,6 +1403,7 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, tlb_finish_mmu(&tlb, address, end); return end; } +EXPORT_SYMBOL_GPL(zap_page_range); /** * zap_vma_ptes - remove ptes mapping the vma @@ -1767,7 +1768,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, else return -EFAULT; } - if (ret & VM_FAULT_SIGBUS) + if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) return i ? i : -EFAULT; BUG(); } @@ -1871,7 +1872,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, return -ENOMEM; if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) return -EHWPOISON; - if (ret & VM_FAULT_SIGBUS) + if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) return -EFAULT; BUG(); } @@ -2661,17 +2662,24 @@ reuse: if (!dirty_page) return ret; - /* - * Yes, Virginia, this is actually required to prevent a race - * with clear_page_dirty_for_io() from clearing the page dirty - * bit after it clear all dirty ptes, but before a racing - * do_wp_page installs a dirty pte. - * - * __do_fault is protected similarly. - */ if (!page_mkwrite) { - wait_on_page_locked(dirty_page); - set_page_dirty_balance(dirty_page, page_mkwrite); + struct address_space *mapping; + int dirtied; + + lock_page(dirty_page); + dirtied = set_page_dirty(dirty_page); + VM_BUG_ON(PageAnon(dirty_page)); + mapping = dirty_page->mapping; + unlock_page(dirty_page); + + if (dirtied && mapping) { + /* + * Some device drivers do not set page.mapping + * but still dirty their pages + */ + balance_dirty_pages_ratelimited(mapping); + } + } put_page(dirty_page); if (page_mkwrite) { @@ -3117,7 +3125,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo if (prev && prev->vm_end == address) return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - expand_downwards(vma, address - PAGE_SIZE); + return expand_downwards(vma, address - PAGE_SIZE); } if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { struct vm_area_struct *next = vma->vm_next; @@ -3126,7 +3134,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo if (next && next->vm_start == address + PAGE_SIZE) return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - expand_upwards(vma, address + PAGE_SIZE); + return expand_upwards(vma, address + PAGE_SIZE); } return 0; } @@ -3148,7 +3156,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, address) < 0) - return VM_FAULT_SIGBUS; + return VM_FAULT_SIGSEGV; /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE)) { @@ -3546,8 +3554,9 @@ retry: barrier(); if (pmd_trans_huge(orig_pmd)) { - if (flags & FAULT_FLAG_WRITE && - !pmd_write(orig_pmd) && + unsigned int dirty = flags & FAULT_FLAG_WRITE; + + if (dirty && !pmd_write(orig_pmd) && !pmd_trans_splitting(orig_pmd)) { ret = do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); @@ -3559,6 +3568,9 @@ retry: if (unlikely(ret & VM_FAULT_OOM)) goto retry; return ret; + } else { + huge_pmd_set_accessed(mm, vma, address, pmd, + orig_pmd, dirty); } return 0; } @@ -3824,7 +3836,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, if (follow_phys(vma, addr, write, &prot, &phys_addr)) return -EINVAL; - maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); + maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); if (write) memcpy_toio(maddr + offset, buf, len); else @@ -3863,7 +3875,11 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) break; - if (vma->vm_ops && vma->vm_ops->access) + if ((vma->vm_flags & VM_PFNMAP) && + !(vma->vm_flags & VM_IO)) + ret = generic_access_phys(vma, addr, buf, + len, write); + if (ret <= 0 && vma->vm_ops && vma->vm_ops->access) ret = vma->vm_ops->access(vma, addr, buf, len, write); if (ret <= 0)