mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables
[pandora-kernel.git] / mm / mempolicy.c
index adc3954..c0007f9 100644 (file)
@@ -512,7 +512,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
        do {
                next = pmd_addr_end(addr, end);
                split_huge_page_pmd(vma->vm_mm, pmd);
-               if (pmd_none_or_clear_bad(pmd))
+               if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                        continue;
                if (check_pte_range(vma, pmd, addr, next, nodes,
                                    flags, private))
@@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        struct vm_area_struct *prev;
        struct vm_area_struct *vma;
        int err = 0;
+       pgoff_t pgoff;
        unsigned long vmstart;
        unsigned long vmend;
 
@@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        if (!vma || vma->vm_start > start)
                return -EFAULT;
 
+       if (start > vma->vm_start)
+               prev = vma;
+
        for (; vma && vma->vm_start < end; prev = vma, vma = next) {
                next = vma->vm_next;
                vmstart = max(start, vma->vm_start);
                vmend   = min(end, vma->vm_end);
 
+               if (mpol_equal(vma_policy(vma), new_pol))
+                       continue;
+
+               pgoff = vma->vm_pgoff +
+                       ((vmstart - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+                                 vma->anon_vma, vma->vm_file, pgoff,
                                  new_pol);
                if (prev) {
                        vma = prev;
@@ -933,7 +942,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 
        if (!list_empty(&pagelist)) {
                err = migrate_pages(&pagelist, new_node_page, dest,
-                                                               false, true);
+                                                       false, MIGRATE_SYNC);
                if (err)
                        putback_lru_pages(&pagelist);
        }
@@ -1834,18 +1843,24 @@ struct page *
 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                unsigned long addr, int node)
 {
-       struct mempolicy *pol = get_vma_policy(current, vma, addr);
+       struct mempolicy *pol;
        struct zonelist *zl;
        struct page *page;
+       unsigned int cpuset_mems_cookie;
+
+retry_cpuset:
+       pol = get_vma_policy(current, vma, addr);
+       cpuset_mems_cookie = get_mems_allowed();
 
-       get_mems_allowed();
        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
                unsigned nid;
 
                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
                mpol_cond_put(pol);
                page = alloc_page_interleave(gfp, order, nid);
-               put_mems_allowed();
+               if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+                       goto retry_cpuset;
+
                return page;
        }
        zl = policy_zonelist(gfp, pol, node);
@@ -1856,7 +1871,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                struct page *page =  __alloc_pages_nodemask(gfp, order,
                                                zl, policy_nodemask(gfp, pol));
                __mpol_put(pol);
-               put_mems_allowed();
+               if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+                       goto retry_cpuset;
                return page;
        }
        /*
@@ -1864,7 +1880,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
         */
        page = __alloc_pages_nodemask(gfp, order, zl,
                                      policy_nodemask(gfp, pol));
-       put_mems_allowed();
+       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               goto retry_cpuset;
        return page;
 }
 
@@ -1891,11 +1908,14 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 {
        struct mempolicy *pol = current->mempolicy;
        struct page *page;
+       unsigned int cpuset_mems_cookie;
 
        if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
                pol = &default_policy;
 
-       get_mems_allowed();
+retry_cpuset:
+       cpuset_mems_cookie = get_mems_allowed();
+
        /*
         * No reference counting needed for current->mempolicy
         * nor system default_policy
@@ -1906,7 +1926,10 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
                page = __alloc_pages_nodemask(gfp, order,
                                policy_zonelist(gfp, pol, numa_node_id()),
                                policy_nodemask(gfp, pol));
-       put_mems_allowed();
+
+       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               goto retry_cpuset;
+
        return page;
 }
 EXPORT_SYMBOL(alloc_pages_current);