mlock: revert mainline handling of mlock error return
[pandora-kernel.git] / mm / memory.c
index 0e4eea1..b8487f8 100644 (file)
@@ -64,6 +64,8 @@
 
 #include "internal.h"
 
+#include "internal.h"
+
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 /* use the per-pgdat data instead for discontigmem - mbligh */
 unsigned long max_mapnr;
@@ -993,7 +995,6 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
                tlb_finish_mmu(tlb, address, end);
        return end;
 }
-EXPORT_SYMBOL_GPL(zap_page_range);
 
 /**
  * zap_vma_ptes - remove ptes mapping the vma
@@ -1111,7 +1112,6 @@ no_page_table:
        }
        return page;
 }
-EXPORT_SYMBOL_GPL(follow_page);
 
 /* Can we do the FOLL_ANON optimization? */
 static inline int use_zero_page(struct vm_area_struct *vma)
@@ -1131,12 +1131,17 @@ static inline int use_zero_page(struct vm_area_struct *vma)
        return !vma->vm_ops || !vma->vm_ops->fault;
 }
 
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, int len, int write, int force,
+
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                    unsigned long start, int len, int flags,
                struct page **pages, struct vm_area_struct **vmas)
 {
        int i;
-       unsigned int vm_flags;
+       unsigned int vm_flags = 0;
+       int write = !!(flags & GUP_FLAGS_WRITE);
+       int force = !!(flags & GUP_FLAGS_FORCE);
+       int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
 
        if (len <= 0)
                return 0;
@@ -1160,7 +1165,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        pud_t *pud;
                        pmd_t *pmd;
                        pte_t *pte;
-                       if (write) /* user gate pages are read-only */
+
+                       /* user gate pages are read-only */
+                       if (!ignore && write)
                                return i ? : -EFAULT;
                        if (pg > TASK_SIZE)
                                pgd = pgd_offset_k(pg);
@@ -1192,8 +1199,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        continue;
                }
 
-               if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
-                               || !(vm_flags & vma->vm_flags))
+               if (!vma ||
+                   (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+                   (!ignore && !(vm_flags & vma->vm_flags)))
                        return i ? : -EFAULT;
 
                if (is_vm_hugetlb_page(vma)) {
@@ -1268,6 +1276,23 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        } while (len);
        return i;
 }
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+               unsigned long start, int len, int write, int force,
+               struct page **pages, struct vm_area_struct **vmas)
+{
+       int flags = 0;
+
+       if (write)
+               flags |= GUP_FLAGS_WRITE;
+       if (force)
+               flags |= GUP_FLAGS_FORCE;
+
+       return __get_user_pages(tsk, mm,
+                               start, len, flags,
+                               pages, vmas);
+}
+
 EXPORT_SYMBOL(get_user_pages);
 
 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -1791,7 +1816,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * not dirty accountable.
         */
        if (PageAnon(old_page)) {
-               if (!TestSetPageLocked(old_page)) {
+               if (trylock_page(old_page)) {
                        reuse = can_share_swap_page(old_page);
                        unlock_page(old_page);
                }
@@ -1860,6 +1885,15 @@ gotten:
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (!new_page)
                goto oom;
+       /*
+        * Don't let another task, with possibly unlocked vma,
+        * keep the mlocked page.
+        */
+       if (vma->vm_flags & VM_LOCKED) {
+               lock_page(old_page);    /* for LRU manipulation */
+               clear_page_mlock(old_page);
+               unlock_page(old_page);
+       }
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
@@ -1888,11 +1922,13 @@ gotten:
                 * thread doing COW.
                 */
                ptep_clear_flush_notify(vma, address, page_table);
-               set_pte_at(mm, address, page_table, entry);
-               update_mmu_cache(vma, address, entry);
-               lru_cache_add_active(new_page);
+               SetPageSwapBacked(new_page);
+               lru_cache_add_active_or_unevictable(new_page, vma);
                page_add_new_anon_rmap(new_page, vma, address);
 
+//TODO:  is this safe?  do_anonymous_page() does it this way.
+               set_pte_at(mm, address, page_table, entry);
+               update_mmu_cache(vma, address, entry);
                if (old_page) {
                        /*
                         * Only after switching the pte to the new page may
@@ -2326,7 +2362,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        page_add_anon_rmap(page, vma, address);
 
        swap_free(entry);
-       if (vm_swap_full())
+       if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                remove_exclusive_swap_page(page);
        unlock_page(page);
 
@@ -2384,7 +2420,8 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_none(*page_table))
                goto release;
        inc_mm_counter(mm, anon_rss);
-       lru_cache_add_active(page);
+       SetPageSwapBacked(page);
+       lru_cache_add_active_or_unevictable(page, vma);
        page_add_new_anon_rmap(page, vma, address);
        set_pte_at(mm, address, page_table, entry);
 
@@ -2465,6 +2502,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
+                       /*
+                        * Don't let another task, with possibly unlocked vma,
+                        * keep the mlocked page.
+                        */
+                       if (vma->vm_flags & VM_LOCKED)
+                               clear_page_mlock(vmf.page);
                        copy_user_highpage(page, vmf.page, address, vma);
                        __SetPageUptodate(page);
                } else {
@@ -2522,11 +2565,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                entry = mk_pte(page, vma->vm_page_prot);
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               set_pte_at(mm, address, page_table, entry);
                if (anon) {
-                        inc_mm_counter(mm, anon_rss);
-                        lru_cache_add_active(page);
-                        page_add_new_anon_rmap(page, vma, address);
+                       inc_mm_counter(mm, anon_rss);
+                       SetPageSwapBacked(page);
+                       lru_cache_add_active_or_unevictable(page, vma);
+                       page_add_new_anon_rmap(page, vma, address);
                } else {
                        inc_mm_counter(mm, file_rss);
                        page_add_file_rmap(page);
@@ -2535,6 +2578,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                get_page(dirty_page);
                        }
                }
+//TODO:  is this safe?  do_anonymous_page() does it this way.
+               set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
                update_mmu_cache(vma, address, entry);
@@ -2767,7 +2812,7 @@ int make_pages_present(unsigned long addr, unsigned long end)
 
        vma = find_vma(current->mm, addr);
        if (!vma)
-               return -1;
+               return -ENOMEM;
        write = (vma->vm_flags & VM_WRITE) != 0;
        BUG_ON(addr >= end);
        BUG_ON(end > vma->vm_end);