UBI: fix atomic LEB change problems
[pandora-kernel.git] / mm / memory.c
index ca8cac1..f82b359 100644 (file)
@@ -1639,6 +1639,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *old_page, *new_page;
        pte_t entry;
        int reuse = 0, ret = 0;
+       int page_mkwrite = 0;
        struct page *dirty_page = NULL;
 
        old_page = vm_normal_page(vma, address, orig_pte);
@@ -1687,6 +1688,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        page_cache_release(old_page);
                        if (!pte_same(*page_table, orig_pte))
                                goto unlock;
+
+                       page_mkwrite = 1;
                }
                dirty_page = old_page;
                get_page(dirty_page);
@@ -1774,7 +1777,7 @@ unlock:
                 * do_no_page is protected similarly.
                 */
                wait_on_page_locked(dirty_page);
-               set_page_dirty_balance(dirty_page);
+               set_page_dirty_balance(dirty_page, page_mkwrite);
                put_page(dirty_page);
        }
        return ret;
@@ -2307,13 +2310,14 @@ oom:
  * do not need to flush old virtual caches or the TLB.
  *
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
+ * but allow concurrent faults), and pte neither mapped nor locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long address, pte_t *page_table, pmd_t *pmd,
+               unsigned long address, pmd_t *pmd,
                pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
 {
+       pte_t *page_table;
        spinlock_t *ptl;
        struct page *page;
        pte_t entry;
@@ -2321,13 +2325,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *dirty_page = NULL;
        struct vm_fault vmf;
        int ret;
+       int page_mkwrite = 0;
 
        vmf.virtual_address = (void __user *)(address & PAGE_MASK);
        vmf.pgoff = pgoff;
        vmf.flags = flags;
        vmf.page = NULL;
 
-       pte_unmap(page_table);
        BUG_ON(vma->vm_flags & VM_PFNMAP);
 
        if (likely(vma->vm_ops->fault)) {
@@ -2398,6 +2402,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                        anon = 1; /* no anon but release vmf.page */
                                        goto out;
                                }
+                               page_mkwrite = 1;
                        }
                }
 
@@ -2453,7 +2458,7 @@ out_unlocked:
        if (anon)
                page_cache_release(vmf.page);
        else if (dirty_page) {
-               set_page_dirty_balance(dirty_page);
+               set_page_dirty_balance(dirty_page, page_mkwrite);
                put_page(dirty_page);
        }
 
@@ -2468,8 +2473,8 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
        unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
 
-       return __do_fault(mm, vma, address, page_table, pmd, pgoff,
-                                                       flags, orig_pte);
+       pte_unmap(page_table);
+       return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
 
 
@@ -2552,9 +2557,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        pgoff = pte_to_pgoff(orig_pte);
-
-       return __do_fault(mm, vma, address, page_table, pmd, pgoff,
-                                                       flags, orig_pte);
+       return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
 
 /*