mm: actually remap enough memory
[pandora-kernel.git] / mm / memory.c
index f0b43f6..79c71de 100644 (file)
@@ -870,20 +870,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                if (!pte_file(pte)) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       if (swap_duplicate(entry) < 0)
-                               return entry.val;
-
-                       /* make sure dst_mm is on swapoff's mmlist. */
-                       if (unlikely(list_empty(&dst_mm->mmlist))) {
-                               spin_lock(&mmlist_lock);
-                               if (list_empty(&dst_mm->mmlist))
-                                       list_add(&dst_mm->mmlist,
-                                                &src_mm->mmlist);
-                               spin_unlock(&mmlist_lock);
-                       }
-                       if (likely(!non_swap_entry(entry)))
+                       if (likely(!non_swap_entry(entry))) {
+                               if (swap_duplicate(entry) < 0)
+                                       return entry.val;
+
+                               /* make sure dst_mm is on swapoff's mmlist. */
+                               if (unlikely(list_empty(&dst_mm->mmlist))) {
+                                       spin_lock(&mmlist_lock);
+                                       if (list_empty(&dst_mm->mmlist))
+                                               list_add(&dst_mm->mmlist,
+                                                        &src_mm->mmlist);
+                                       spin_unlock(&mmlist_lock);
+                               }
                                rss[MM_SWAPENTS]++;
-                       else if (is_write_migration_entry(entry) &&
+                       else if (is_write_migration_entry(entry) &&
                                        is_cow_mapping(vm_flags)) {
                                /*
                                 * COW mappings require pages in both parent
@@ -1178,8 +1178,10 @@ again:
                        if (unlikely(page_mapcount(page) < 0))
                                print_bad_pte(vma, addr, ptent, page);
                        force_flush = !__tlb_remove_page(tlb, page);
-                       if (force_flush)
+                       if (force_flush) {
+                               addr += PAGE_SIZE;
                                break;
+                       }
                        continue;
                }
                /*
@@ -1766,7 +1768,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                                else
                                                        return -EFAULT;
                                        }
-                                       if (ret & VM_FAULT_SIGBUS)
+                                       if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
@@ -1853,19 +1855,24 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
                     unsigned long address, unsigned int fault_flags)
 {
        struct vm_area_struct *vma;
+       vm_flags_t vm_flags;
        int ret;
 
        vma = find_extend_vma(mm, address);
        if (!vma || address < vma->vm_start)
                return -EFAULT;
 
+       vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
+       if (!(vm_flags & vma->vm_flags))
+               return -EFAULT;
+
        ret = handle_mm_fault(mm, vma, address, fault_flags);
        if (ret & VM_FAULT_ERROR) {
                if (ret & VM_FAULT_OOM)
                        return -ENOMEM;
                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
                        return -EHWPOISON;
-               if (ret & VM_FAULT_SIGBUS)
+               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
                        return -EFAULT;
                BUG();
        }
@@ -2655,17 +2662,24 @@ reuse:
                if (!dirty_page)
                        return ret;
 
-               /*
-                * Yes, Virginia, this is actually required to prevent a race
-                * with clear_page_dirty_for_io() from clearing the page dirty
-                * bit after it clear all dirty ptes, but before a racing
-                * do_wp_page installs a dirty pte.
-                *
-                * __do_fault is protected similarly.
-                */
                if (!page_mkwrite) {
-                       wait_on_page_locked(dirty_page);
-                       set_page_dirty_balance(dirty_page, page_mkwrite);
+                       struct address_space *mapping;
+                       int dirtied;
+
+                       lock_page(dirty_page);
+                       dirtied = set_page_dirty(dirty_page);
+                       VM_BUG_ON(PageAnon(dirty_page));
+                       mapping = dirty_page->mapping;
+                       unlock_page(dirty_page);
+
+                       if (dirtied && mapping) {
+                               /*
+                                * Some device drivers do not set page.mapping
+                                * but still dirty their pages
+                                */
+                               balance_dirty_pages_ratelimited(mapping);
+                       }
+
                }
                put_page(dirty_page);
                if (page_mkwrite) {
@@ -3111,7 +3125,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (prev && prev->vm_end == address)
                        return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
 
-               expand_downwards(vma, address - PAGE_SIZE);
+               return expand_downwards(vma, address - PAGE_SIZE);
        }
        if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
                struct vm_area_struct *next = vma->vm_next;
@@ -3120,7 +3134,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (next && next->vm_start == address + PAGE_SIZE)
                        return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
 
-               expand_upwards(vma, address + PAGE_SIZE);
+               return expand_upwards(vma, address + PAGE_SIZE);
        }
        return 0;
 }
@@ -3142,7 +3156,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        /* Check if we need to add a guard page to the stack */
        if (check_stack_guard_page(vma, address) < 0)
-               return VM_FAULT_SIGBUS;
+               return VM_FAULT_SIGSEGV;
 
        /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE)) {
@@ -3822,7 +3836,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
        if (follow_phys(vma, addr, write, &prot, &phys_addr))
                return -EINVAL;
 
-       maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
+       maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
        if (write)
                memcpy_toio(maddr + offset, buf, len);
        else
@@ -3861,7 +3875,11 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
                        vma = find_vma(mm, addr);
                        if (!vma || vma->vm_start > addr)
                                break;
-                       if (vma->vm_ops && vma->vm_ops->access)
+                       if ((vma->vm_flags & VM_PFNMAP) &&
+                           !(vma->vm_flags & VM_IO))
+                               ret = generic_access_phys(vma, addr, buf,
+                                                         len, write);
+                       if (ret <= 0 && vma->vm_ops && vma->vm_ops->access)
                                ret = vma->vm_ops->access(vma, addr, buf,
                                                          len, write);
                        if (ret <= 0)