Merge branch 'fbdev-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / mm / huge_memory.c
index 83326ad..81532f2 100644 (file)
@@ -1139,7 +1139,7 @@ static int __split_huge_page_splitting(struct page *page,
                 * We can't temporarily set the pmd to null in order
                 * to split it, the pmd must remain marked huge at all
                 * times or the VM won't take the pmd_trans_huge paths
-                * and it won't wait on the anon_vma->root->lock to
+                * and it won't wait on the anon_vma->root->mutex to
                 * serialize against split_huge_page*.
                 */
                pmdp_splitting_flush_notify(vma, address, pmd);
@@ -1333,7 +1333,7 @@ static int __split_huge_page_map(struct page *page,
        return ret;
 }
 
-/* must be called with anon_vma->root->lock hold */
+/* must be called with anon_vma->root->mutex hold */
 static void __split_huge_page(struct page *page,
                              struct anon_vma *anon_vma)
 {
@@ -1771,12 +1771,9 @@ static void collapse_huge_page(struct mm_struct *mm,
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 #ifndef CONFIG_NUMA
+       up_read(&mm->mmap_sem);
        VM_BUG_ON(!*hpage);
        new_page = *hpage;
-       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
-               up_read(&mm->mmap_sem);
-               return;
-       }
 #else
        VM_BUG_ON(*hpage);
        /*
@@ -1791,22 +1788,26 @@ static void collapse_huge_page(struct mm_struct *mm,
         */
        new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
                                      node, __GFP_OTHER_NODE);
+
+       /*
+        * After allocating the hugepage, release the mmap_sem read lock in
+        * preparation for taking it in write mode.
+        */
+       up_read(&mm->mmap_sem);
        if (unlikely(!new_page)) {
-               up_read(&mm->mmap_sem);
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                *hpage = ERR_PTR(-ENOMEM);
                return;
        }
+#endif
+
        count_vm_event(THP_COLLAPSE_ALLOC);
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
-               up_read(&mm->mmap_sem);
+#ifdef CONFIG_NUMA
                put_page(new_page);
+#endif
                return;
        }
-#endif
-
-       /* after allocating the hugepage upgrade to mmap_sem write mode */
-       up_read(&mm->mmap_sem);
 
        /*
         * Prevent all access to pagetables with the exception of
@@ -2233,11 +2234,8 @@ static void khugepaged_loop(void)
        while (likely(khugepaged_enabled())) {
 #ifndef CONFIG_NUMA
                hpage = khugepaged_alloc_hugepage();
-               if (unlikely(!hpage)) {
-                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+               if (unlikely(!hpage))
                        break;
-               }
-               count_vm_event(THP_COLLAPSE_ALLOC);
 #else
                if (IS_ERR(hpage)) {
                        khugepaged_alloc_sleep();