mm: correct page->pfmemalloc to fix deactivate_slab regression
[pandora-kernel.git] / mm / hugetlb.c
index ec7b86e..bc72712 100644 (file)
@@ -928,14 +928,8 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
        page = dequeue_huge_page_node(h, nid);
        spin_unlock(&hugetlb_lock);
 
-       if (!page) {
+       if (!page)
                page = alloc_buddy_huge_page(h, nid);
-               if (page) {
-                       spin_lock(&hugetlb_lock);
-                       list_move(&page->lru, &h->hugepage_activelist);
-                       spin_unlock(&hugetlb_lock);
-               }
-       }
 
        return page;
 }
@@ -1150,9 +1144,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        }
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
-       spin_unlock(&hugetlb_lock);
-
-       if (!page) {
+       if (page) {
+               /* update page cgroup details */
+               hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+                                            h_cg, page);
+               spin_unlock(&hugetlb_lock);
+       } else {
+               spin_unlock(&hugetlb_lock);
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
                if (!page) {
                        hugetlb_cgroup_uncharge_cgroup(idx,
@@ -1162,6 +1160,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
                        return ERR_PTR(-ENOSPC);
                }
                spin_lock(&hugetlb_lock);
+               hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+                                            h_cg, page);
                list_move(&page->lru, &h->hugepage_activelist);
                spin_unlock(&hugetlb_lock);
        }
@@ -1169,8 +1169,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        set_page_private(page, (unsigned long)spool);
 
        vma_commit_reservation(h, vma, addr);
-       /* update page cgroup details */
-       hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
        return page;
 }
 
@@ -2431,6 +2429,25 @@ again:
        tlb_end_vma(tlb, vma);
 }
 
+void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+                         struct vm_area_struct *vma, unsigned long start,
+                         unsigned long end, struct page *ref_page)
+{
+       __unmap_hugepage_range(tlb, vma, start, end, ref_page);
+
+       /*
+        * Clear this flag so that x86's huge_pmd_share page_table_shareable
+        * test will fail on a vma being torn down, and not grab a page table
+        * on its way out.  We're lucky that the flag has such an appropriate
+        * name, and can in fact be safely cleared here. We could clear it
+        * before the __unmap_hugepage_range above, but all that's necessary
+        * is to clear it before releasing the i_mmap_mutex. This works
+        * because in the context this is called, the VMA is about to be
+        * destroyed and the i_mmap_mutex is held.
+        */
+       vma->vm_flags &= ~VM_MAYSHARE;
+}
+
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
@@ -3014,9 +3031,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
-       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
-
+       /*
+        * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
+        * may have cleared our pud entry and done put_page on the page table:
+        * once we release i_mmap_mutex, another task can do the final put_page
+        * and that page table be reused and filled with junk.
+        */
        flush_tlb_range(vma, start, end);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 }
 
 int hugetlb_reserve_pages(struct inode *inode,