merge_config.sh: Avoid creating unnessary source softlinks
[pandora-kernel.git] / mm / mmap.c
index a23e30f..2664a47 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -144,7 +144,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                 */
                free -= global_page_state(NR_SHMEM);
 
-               free += nr_swap_pages;
+               free += get_nr_swap_pages();
 
                /*
                 * Any slabs which are created with the
@@ -203,7 +203,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
 {
        if (vma->vm_flags & VM_DENYWRITE)
-               atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
+               atomic_inc(&file_inode(file)->i_writecount);
        if (vma->vm_flags & VM_SHARED)
                mapping->i_mmap_writable--;
 
@@ -576,7 +576,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
                struct address_space *mapping = file->f_mapping;
 
                if (vma->vm_flags & VM_DENYWRITE)
-                       atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
+                       atomic_dec(&file_inode(file)->i_writecount);
                if (vma->vm_flags & VM_SHARED)
                        mapping->i_mmap_writable++;
 
@@ -809,7 +809,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                anon_vma_interval_tree_post_update_vma(vma);
                if (adjust_next)
                        anon_vma_interval_tree_post_update_vma(next);
-               anon_vma_unlock(anon_vma);
+               anon_vma_unlock_write(anon_vma);
        }
        if (mapping)
                mutex_unlock(&mapping->i_mmap_mutex);
@@ -1163,13 +1163,13 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
 unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
                        unsigned long flags, unsigned long pgoff,
-                       bool *populate)
+                       unsigned long *populate)
 {
        struct mm_struct * mm = current->mm;
        struct inode *inode;
        vm_flags_t vm_flags;
 
-       *populate = false;
+       *populate = 0;
 
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -1229,7 +1229,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        return -EAGAIN;
        }
 
-       inode = file ? file->f_path.dentry->d_inode : NULL;
+       inode = file ? file_inode(file) : NULL;
 
        if (file) {
                switch (flags & MAP_TYPE) {
@@ -1291,11 +1291,23 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                }
        }
 
-       addr = mmap_region(file, addr, len, flags, vm_flags, pgoff);
-       if (!IS_ERR_VALUE(addr) &&
-           ((vm_flags & VM_LOCKED) ||
-            (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
-               *populate = true;
+       /*
+        * Set 'VM_NORESERVE' if we should not account for the
+        * memory use of this mapping.
+        */
+       if (flags & MAP_NORESERVE) {
+               /* We honor MAP_NORESERVE if allowed to overcommit */
+               if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
+                       vm_flags |= VM_NORESERVE;
+
+               /* hugetlb applies strict overcommit unless MAP_NORESERVE */
+               if (file && is_file_hugepages(file))
+                       vm_flags |= VM_NORESERVE;
+       }
+
+       addr = mmap_region(file, addr, len, vm_flags, pgoff);
+       if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE))
+               *populate = len;
        return addr;
 }
 
@@ -1411,8 +1423,7 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
 }
 
 unsigned long mmap_region(struct file *file, unsigned long addr,
-                         unsigned long len, unsigned long flags,
-                         vm_flags_t vm_flags, unsigned long pgoff)
+               unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
@@ -1420,7 +1431,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        int error;
        struct rb_node **rb_link, *rb_parent;
        unsigned long charged = 0;
-       struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
+       struct inode *inode =  file ? file_inode(file) : NULL;
 
        /* Clear old maps */
        error = -ENOMEM;
@@ -1435,20 +1446,6 @@ munmap_back:
        if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
-       /*
-        * Set 'VM_NORESERVE' if we should not account for the
-        * memory use of this mapping.
-        */
-       if ((flags & MAP_NORESERVE)) {
-               /* We honor MAP_NORESERVE if allowed to overcommit */
-               if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
-                       vm_flags |= VM_NORESERVE;
-
-               /* hugetlb applies strict overcommit unless MAP_NORESERVE */
-               if (file && is_file_hugepages(file))
-                       vm_flags |= VM_NORESERVE;
-       }
-
        /*
         * Private writable mapping: check memory availability
         */
@@ -2188,9 +2185,28 @@ int expand_downwards(struct vm_area_struct *vma,
        return error;
 }
 
+/*
+ * Note how expand_stack() refuses to expand the stack all the way to
+ * abut the next virtual mapping, *unless* that mapping itself is also
+ * a stack mapping. We want to leave room for a guard page, after all
+ * (the guard page itself is not added here, that is done by the
+ * actual page faulting logic)
+ *
+ * This matches the behavior of the guard page logic (see mm/memory.c:
+ * check_stack_guard_page()), which only allows the guard page to be
+ * removed under these circumstances.
+ */
 #ifdef CONFIG_STACK_GROWSUP
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
+       struct vm_area_struct *next;
+
+       address &= PAGE_MASK;
+       next = vma->vm_next;
+       if (next && next->vm_start == address + PAGE_SIZE) {
+               if (!(next->vm_flags & VM_GROWSUP))
+                       return -ENOMEM;
+       }
        return expand_upwards(vma, address);
 }
 
@@ -2205,14 +2221,21 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return vma;
        if (!prev || expand_stack(prev, addr))
                return NULL;
-       if (prev->vm_flags & VM_LOCKED) {
-               mlock_vma_pages_range(prev, addr, prev->vm_end);
-       }
+       if (prev->vm_flags & VM_LOCKED)
+               __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
        return prev;
 }
 #else
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
+       struct vm_area_struct *prev;
+
+       address &= PAGE_MASK;
+       prev = vma->vm_prev;
+       if (prev && prev->vm_end == address) {
+               if (!(prev->vm_flags & VM_GROWSDOWN))
+                       return -ENOMEM;
+       }
        return expand_downwards(vma, address);
 }
 
@@ -2233,9 +2256,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
-       if (vma->vm_flags & VM_LOCKED) {
-               mlock_vma_pages_range(vma, addr, start);
-       }
+       if (vma->vm_flags & VM_LOCKED)
+               __mlock_vma_pages_range(vma, addr, start, NULL);
        return vma;
 }
 #endif
@@ -3022,7 +3044,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
                if (!__test_and_clear_bit(0, (unsigned long *)
                                          &anon_vma->root->rb_root.rb_node))
                        BUG();
-               anon_vma_unlock(anon_vma);
+               anon_vma_unlock_write(anon_vma);
        }
 }