udf: don't increment lenExtents while writing to a hole
[pandora-kernel.git] / mm / memory.c
index 7b1e4fe..fb135ba 100644 (file)
@@ -712,7 +712,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
        add_taint(TAINT_BAD_PAGE);
 }
 
-static inline int is_cow_mapping(vm_flags_t flags)
+static inline bool is_cow_mapping(vm_flags_t flags)
 {
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
@@ -1039,6 +1039,9 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        unsigned long next;
        unsigned long addr = vma->vm_start;
        unsigned long end = vma->vm_end;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
+       bool is_cow;
        int ret;
 
        /*
@@ -1072,8 +1075,12 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * parent mm. And a permission downgrade will only happen if
         * is_cow_mapping() returns true.
         */
-       if (is_cow_mapping(vma->vm_flags))
-               mmu_notifier_invalidate_range_start(src_mm, addr, end);
+       is_cow = is_cow_mapping(vma->vm_flags);
+       mmun_start = addr;
+       mmun_end   = end;
+       if (is_cow)
+               mmu_notifier_invalidate_range_start(src_mm, mmun_start,
+                                                   mmun_end);
 
        ret = 0;
        dst_pgd = pgd_offset(dst_mm, addr);
@@ -1089,9 +1096,8 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                }
        } while (dst_pgd++, src_pgd++, addr = next, addr != end);
 
-       if (is_cow_mapping(vma->vm_flags))
-               mmu_notifier_invalidate_range_end(src_mm,
-                                                 vma->vm_start, end);
+       if (is_cow)
+               mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
        return ret;
 }
 
@@ -1522,7 +1528,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                                spin_unlock(&mm->page_table_lock);
                                wait_split_huge_page(vma->anon_vma, pmd);
                        } else {
-                               page = follow_trans_huge_pmd(mm, address,
+                               page = follow_trans_huge_pmd(vma, address,
                                                             pmd, flags);
                                spin_unlock(&mm->page_table_lock);
                                goto out;
@@ -1577,12 +1583,12 @@ split_fallthrough:
                if (page->mapping && trylock_page(page)) {
                        lru_add_drain();  /* push cached pages to LRU */
                        /*
-                        * Because we lock page here and migration is
-                        * blocked by the pte's page reference, we need
-                        * only check for file-cache page truncation.
+                        * Because we lock page here, and migration is
+                        * blocked by the pte's page reference, and we
+                        * know the page is still mapped, we don't even
+                        * need to check for file-cache page truncation.
                         */
-                       if (page->mapping)
-                               mlock_vma_page(page);
+                       mlock_vma_page(page);
                        unlock_page(page);
                }
        }
@@ -2142,7 +2148,7 @@ out:
  * @addr: target user address of this page
  * @pfn: source kernel pfn
  *
- * Similar to vm_inert_page, this allows drivers to insert individual pages
+ * Similar to vm_insert_page, this allows drivers to insert individual pages
  * they've allocated into a user vma. Same comments apply.
  *
  * This function should only be called from a vm_ops->fault handler, and
@@ -2297,14 +2303,13 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * rest of the world about it:
         *   VM_IO tells people not to look at these pages
         *      (accesses can have side effects).
-        *   VM_RESERVED is specified all over the place, because
-        *      in 2.4 it kept swapout's vma scan off this vma; but
-        *      in 2.6 the LRU scan won't even find its pages, so this
-        *      flag means no more than count its pages in reserved_vm,
-        *      and omit it from core dump, even when VM_IO turned off.
         *   VM_PFNMAP tells the core MM that the base pages are just
         *      raw PFN mappings, and do not have a "struct page" associated
         *      with them.
+        *   VM_DONTEXPAND
+        *      Disable vma merging and expanding with mremap().
+        *   VM_DONTDUMP
+        *      Omit vma from core dump, even when VM_IO turned off.
         *
         * There's a horrible special case to handle copy-on-write
         * behaviour that some programs depend on. We mark the "original"
@@ -2321,7 +2326,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
        if (err)
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -2517,11 +2522,14 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                spinlock_t *ptl, pte_t orig_pte)
        __releases(ptl)
 {
-       struct page *old_page, *new_page;
+       struct page *old_page, *new_page = NULL;
        pte_t entry;
        int ret = 0;
        int page_mkwrite = 0;
        struct page *dirty_page = NULL;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
+       bool mmun_called = false;       /* For mmu_notifiers */
 
        old_page = vm_normal_page(vma, address, orig_pte);
        if (!old_page) {
@@ -2699,6 +2707,11 @@ gotten:
        if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
 
+       mmun_start  = address & PAGE_MASK;
+       mmun_end    = (address & PAGE_MASK) + PAGE_SIZE;
+       mmun_called = true;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
        /*
         * Re-check the pte - we dropped the lock
         */
@@ -2765,6 +2778,8 @@ gotten:
                page_cache_release(new_page);
 unlock:
        pte_unmap_unlock(page_table, ptl);
+       if (mmun_called)
+               mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        if (old_page) {
                /*
                 * Don't let another task, with possibly unlocked vma,
@@ -2802,14 +2817,13 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
        zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
 }
 
-static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
+static inline void unmap_mapping_range_tree(struct rb_root *root,
                                            struct zap_details *details)
 {
        struct vm_area_struct *vma;
-       struct prio_tree_iter iter;
        pgoff_t vba, vea, zba, zea;
 
-       vma_prio_tree_foreach(vma, &iter, root,
+       vma_interval_tree_foreach(vma, root,
                        details->first_index, details->last_index) {
 
                vba = vma->vm_pgoff;
@@ -2840,7 +2854,7 @@ static inline void unmap_mapping_range_list(struct list_head *head,
         * across *all* the pages in each nonlinear VMA, not just the pages
         * whose virtual address lies outside the file truncation point.
         */
-       list_for_each_entry(vma, head, shared.vm_set.list) {
+       list_for_each_entry(vma, head, shared.nonlinear) {
                details->nonlinear_vma = vma;
                unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
        }
@@ -2884,7 +2898,7 @@ void unmap_mapping_range(struct address_space *mapping,
 
 
        mutex_lock(&mapping->i_mmap_mutex);
-       if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
+       if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);