md: linear: Make array_size sector-based and rename it to array_sectors.
[pandora-kernel.git] / mm / memory.c
index 9aefaae..2302d22 100644 (file)
@@ -1045,6 +1045,26 @@ no_page_table:
        return page;
 }
 
+/* Can we do the FOLL_ANON optimization? */
+static inline int use_zero_page(struct vm_area_struct *vma)
+{
+       /*
+        * We don't want to optimize FOLL_ANON for make_pages_present()
+        * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
+        * we want to get the page from the page tables to make sure
+        * that we serialize and update with any other user of that
+        * mapping.
+        */
+       if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
+               return 0;
+       /*
+        * And if we have a fault or a nopfn routine, it's not an
+        * anonymous region.
+        */
+       return !vma->vm_ops ||
+               (!vma->vm_ops->fault && !vma->vm_ops->nopfn);
+}
+
 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, int len, int write, int force,
                struct page **pages, struct vm_area_struct **vmas)
@@ -1119,8 +1139,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                foll_flags = FOLL_TOUCH;
                if (pages)
                        foll_flags |= FOLL_GET;
-               if (!write && !(vma->vm_flags & VM_LOCKED) &&
-                   (!vma->vm_ops || !vma->vm_ops->fault))
+               if (!write && use_zero_page(vma))
                        foll_flags |= FOLL_ANON;
 
                do {
@@ -1132,7 +1151,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                         * be processed until returning to user space.
                         */
                        if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
-                               return -ENOMEM;
+                               return i ? i : -ENOMEM;
 
                        if (write)
                                foll_flags |= FOLL_WRITE;
@@ -1678,8 +1697,19 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *dirty_page = NULL;
 
        old_page = vm_normal_page(vma, address, orig_pte);
-       if (!old_page)
+       if (!old_page) {
+               /*
+                * VM_MIXEDMAP !pfn_valid() case
+                *
+                * We should not cow pages in a shared writeable mapping.
+                * Just mark the pages writable as we can't do any dirty
+                * accounting on raw pfn maps.
+                */
+               if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+                                    (VM_WRITE|VM_SHARED))
+                       goto reuse;
                goto gotten;
+       }
 
        /*
         * Take out anonymous pages first, anonymous shared vmas are
@@ -1732,6 +1762,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        if (reuse) {
+reuse:
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -1766,7 +1797,6 @@ gotten:
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (likely(pte_same(*page_table, orig_pte))) {
                if (old_page) {
-                       page_remove_rmap(old_page, vma);
                        if (!PageAnon(old_page)) {
                                dec_mm_counter(mm, file_rss);
                                inc_mm_counter(mm, anon_rss);
@@ -1788,6 +1818,32 @@ gotten:
                lru_cache_add_active(new_page);
                page_add_new_anon_rmap(new_page, vma, address);
 
+               if (old_page) {
+                       /*
+                        * Only after switching the pte to the new page may
+                        * we remove the mapcount here. Otherwise another
+                        * process may come and find the rmap count decremented
+                        * before the pte is switched to the new page, and
+                        * "reuse" the old page writing into it while our pte
+                        * here still points into it and can be read by other
+                        * threads.
+                        *
+                        * The critical issue is to order this
+                        * page_remove_rmap with the ptp_clear_flush above.
+                        * Those stores are ordered by (if nothing else,)
+                        * the barrier present in the atomic_add_negative
+                        * in page_remove_rmap.
+                        *
+                        * Then the TLB flush in ptep_clear_flush ensures that
+                        * no process can access the old page before the
+                        * decremented mapcount is visible. And the old page
+                        * cannot be reused until after the decremented
+                        * mapcount is visible. So transitively, TLBs to
+                        * old page will be flushed before it can be reused.
+                        */
+                       page_remove_rmap(old_page, vma);
+               }
+
                /* Free the old page.. */
                new_page = old_page;
                ret |= VM_FAULT_WRITE;