mm: fix sleeping function warning from __put_anon_vma
[pandora-kernel.git] / mm / rmap.c
index 6541cf7..ae27d95 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
 #include <linux/ksm.h>
 #include <linux/rmap.h>
 #include <linux/rcupdate.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/memcontrol.h>
 #include <linux/mmu_notifier.h>
 #include <linux/migrate.h>
 #include <linux/hugetlb.h>
+#include <linux/backing-dev.h>
 
 #include <asm/tlbflush.h>
 
@@ -102,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
         * LOCK should suffice since the actual taking of the lock must
         * happen _before_ what follows.
         */
+       might_sleep();
        if (mutex_is_locked(&anon_vma->root->mutex)) {
                anon_vma_lock(anon_vma);
                anon_vma_unlock(anon_vma);
@@ -433,8 +435,9 @@ struct anon_vma *page_get_anon_vma(struct page *page)
         * above cannot corrupt).
         */
        if (!page_mapped(page)) {
+               rcu_read_unlock();
                put_anon_vma(anon_vma);
-               anon_vma = NULL;
+               return NULL;
        }
 out:
        rcu_read_unlock();
@@ -484,9 +487,9 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
        }
 
        if (!page_mapped(page)) {
+               rcu_read_unlock();
                put_anon_vma(anon_vma);
-               anon_vma = NULL;
-               goto out;
+               return NULL;
        }
 
        /* we pinned the anon_vma, its safe to sleep */
@@ -580,7 +583,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
        spinlock_t *ptl;
 
        if (unlikely(PageHuge(page))) {
+               /* when pud is not present, pte will be NULL */
                pte = huge_pte_offset(mm, address);
+               if (!pte)
+                       return NULL;
+
                ptl = &mm->page_table_lock;
                goto check;
        }
@@ -935,11 +942,8 @@ int page_mkclean(struct page *page)
 
        if (page_mapped(page)) {
                struct address_space *mapping = page_mapping(page);
-               if (mapping) {
+               if (mapping)
                        ret = page_mkclean_file(mapping, page);
-                       if (page_test_and_clear_dirty(page_to_pfn(page), 1))
-                               ret = 1;
-               }
        }
 
        return ret;
@@ -1120,6 +1124,8 @@ void page_add_file_rmap(struct page *page)
  */
 void page_remove_rmap(struct page *page)
 {
+       struct address_space *mapping = page_mapping(page);
+
        /* page still mapped by someone else? */
        if (!atomic_add_negative(-1, &page->_mapcount))
                return;
@@ -1130,8 +1136,19 @@ void page_remove_rmap(struct page *page)
         * this if the page is anon, so about to be freed; but perhaps
         * not if it's in swapcache - there might be another pte slot
         * containing the swap entry, but page not yet written to swap.
+        *
+        * And we can skip it on file pages, so long as the filesystem
+        * participates in dirty tracking; but need to catch shm and tmpfs
+        * and ramfs pages which have been modified since creation by read
+        * fault.
+        *
+        * Note that mapping must be decided above, before decrementing
+        * mapcount (which luckily provides a barrier): once page is unmapped,
+        * it could be truncated and page->mapping reset to NULL at any moment.
+        * Note also that we are relying on page_mapping(page) to set mapping
+        * to &swapper_space when PageSwapCache(page).
         */
-       if ((!PageAnon(page) || PageSwapCache(page)) &&
+       if (mapping && !mapping_cap_account_dirty(mapping) &&
            page_test_and_clear_dirty(page_to_pfn(page), 1))
                set_page_dirty(page);
        /*
@@ -1370,9 +1387,19 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
                BUG_ON(!page || PageAnon(page));
 
                if (locked_vma) {
-                       mlock_vma_page(page);   /* no-op if already mlocked */
-                       if (page == check_page)
+                       if (page == check_page) {
+                               /* we know we have check_page locked */
+                               mlock_vma_page(page);
                                ret = SWAP_MLOCK;
+                       } else if (trylock_page(page)) {
+                               /*
+                                * If we can lock the page, perform mlock.
+                                * Otherwise leave the page alone, it will be
+                                * eventually encountered again later.
+                                */
+                               mlock_vma_page(page);
+                               unlock_page(page);
+                       }
                        continue;       /* don't unmap */
                }