Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
[pandora-kernel.git] / mm / mremap.c
index 506fa44..d6959cb 100644 (file)
@@ -41,8 +41,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
                return NULL;
 
        pmd = pmd_offset(pud, addr);
-       split_huge_page_pmd(mm, pmd);
-       if (pmd_none_or_clear_bad(pmd))
+       if (pmd_none(*pmd))
                return NULL;
 
        return pmd;
@@ -65,8 +64,6 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                return NULL;
 
        VM_BUG_ON(pmd_trans_huge(*pmd));
-       if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr))
-               return NULL;
 
        return pmd;
 }
@@ -80,11 +77,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        struct mm_struct *mm = vma->vm_mm;
        pte_t *old_pte, *new_pte, pte;
        spinlock_t *old_ptl, *new_ptl;
-       unsigned long old_start;
 
-       old_start = old_addr;
-       mmu_notifier_invalidate_range_start(vma->vm_mm,
-                                           old_start, old_end);
        if (vma->vm_file) {
                /*
                 * Subtle point from Rajesh Venkatasubramanian: before
@@ -111,7 +104,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                                   new_pte++, new_addr += PAGE_SIZE) {
                if (pte_none(*old_pte))
                        continue;
-               pte = ptep_clear_flush(vma, old_addr, old_pte);
+               pte = ptep_get_and_clear(mm, old_addr, old_pte);
                pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
                set_pte_at(mm, new_addr, new_pte, pte);
        }
@@ -123,7 +116,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        pte_unmap_unlock(old_pte - 1, old_ptl);
        if (mapping)
                mutex_unlock(&mapping->i_mmap_mutex);
-       mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
 }
 
 #define LATENCY_LIMIT  (64 * PAGE_SIZE)
@@ -134,22 +126,43 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 {
        unsigned long extent, next, old_end;
        pmd_t *old_pmd, *new_pmd;
+       bool need_flush = false;
 
        old_end = old_addr + len;
        flush_cache_range(vma, old_addr, old_end);
 
+       mmu_notifier_invalidate_range_start(vma->vm_mm, old_addr, old_end);
+
        for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
                cond_resched();
                next = (old_addr + PMD_SIZE) & PMD_MASK;
-               if (next - 1 > old_end)
-                       next = old_end;
+               /* even if next overflowed, extent below will be ok */
                extent = next - old_addr;
+               if (extent > old_end - old_addr)
+                       extent = old_end - old_addr;
                old_pmd = get_old_pmd(vma->vm_mm, old_addr);
                if (!old_pmd)
                        continue;
                new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
                if (!new_pmd)
                        break;
+               if (pmd_trans_huge(*old_pmd)) {
+                       int err = 0;
+                       if (extent == HPAGE_PMD_SIZE)
+                               err = move_huge_pmd(vma, new_vma, old_addr,
+                                                   new_addr, old_end,
+                                                   old_pmd, new_pmd);
+                       if (err > 0) {
+                               need_flush = true;
+                               continue;
+                       } else if (!err) {
+                               split_huge_page_pmd(vma->vm_mm, old_pmd);
+                       }
+                       VM_BUG_ON(pmd_trans_huge(*old_pmd));
+               }
+               if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
+                                                     new_pmd, new_addr))
+                       break;
                next = (new_addr + PMD_SIZE) & PMD_MASK;
                if (extent > next - new_addr)
                        extent = next - new_addr;
@@ -157,7 +170,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        extent = LATENCY_LIMIT;
                move_ptes(vma, old_pmd, old_addr, old_addr + extent,
                                new_vma, new_pmd, new_addr);
+               need_flush = true;
        }
+       if (likely(need_flush))
+               flush_tlb_range(vma, old_end-len, old_addr);
+
+       mmu_notifier_invalidate_range_end(vma->vm_mm, old_end-len, old_end);
 
        return len + old_addr - old_end;        /* how much done */
 }