mm: allow drivers to prevent new writable mappings
[pandora-kernel.git] / mm / mmap.c
index cbcf486..a34afb8 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -194,7 +194,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
        if (vma->vm_flags & VM_DENYWRITE)
                atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
        if (vma->vm_flags & VM_SHARED)
-               mapping->i_mmap_writable--;
+               mapping_unmap_writable(mapping);
 
        flush_dcache_mmap_lock(mapping);
        if (unlikely(vma->vm_flags & VM_NONLINEAR))
@@ -410,7 +410,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
                if (vma->vm_flags & VM_DENYWRITE)
                        atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
                if (vma->vm_flags & VM_SHARED)
-                       mapping->i_mmap_writable++;
+                       atomic_inc(&mapping->i_mmap_writable);
 
                flush_dcache_mmap_lock(mapping);
                if (unlikely(vma->vm_flags & VM_NONLINEAR))
@@ -1049,6 +1049,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 
                        if (!file->f_op || !file->f_op->mmap)
                                return -ENODEV;
+                       if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+                               return -EINVAL;
                        break;
 
                default:
@@ -1057,6 +1059,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        } else {
                switch (flags & MAP_TYPE) {
                case MAP_SHARED:
+                       if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+                               return -EINVAL;
                        /*
                         * Ignore pgoff.
                         */
@@ -1201,11 +1205,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
-       int correct_wcount = 0;
        int error;
        struct rb_node **rb_link, *rb_parent;
        unsigned long charged = 0;
-       struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
 
        /* Clear old maps */
        error = -ENOMEM;
@@ -1272,17 +1274,23 @@ munmap_back:
        INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        if (file) {
-               error = -EINVAL;
-               if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
-                       goto free_vma;
                if (vm_flags & VM_DENYWRITE) {
                        error = deny_write_access(file);
                        if (error)
                                goto free_vma;
-                       correct_wcount = 1;
                }
-               vma->vm_file = file;
-               get_file(file);
+               if (vm_flags & VM_SHARED) {
+                       error = mapping_map_writable(file->f_mapping);
+                       if (error)
+                               goto allow_write_and_free_vma;
+               }
+
+               /* ->mmap() can change vma->vm_file, but must guarantee that
+                * vma_link() below can deny write-access if VM_DENYWRITE is set
+                * and map writably if VM_SHARED is set. This usually means the
+                * new file must not have been exposed to user-space, yet.
+                */
+               vma->vm_file = get_file(file);
                error = file->f_op->mmap(file, vma);
                if (error)
                        goto unmap_and_free_vma;
@@ -1319,11 +1327,14 @@ munmap_back:
        }
 
        vma_link(mm, vma, prev, rb_link, rb_parent);
-       file = vma->vm_file;
-
        /* Once vma denies write, undo our temporary denial count */
-       if (correct_wcount)
-               atomic_inc(&inode->i_writecount);
+       if (file) {
+               if (vm_flags & VM_SHARED)
+                       mapping_unmap_writable(file->f_mapping);
+               if (vm_flags & VM_DENYWRITE)
+                       allow_write_access(file);
+       }
+       file = vma->vm_file;
 out:
        perf_event_mmap(vma);
 
@@ -1337,14 +1348,17 @@ out:
        return addr;
 
 unmap_and_free_vma:
-       if (correct_wcount)
-               atomic_inc(&inode->i_writecount);
        vma->vm_file = NULL;
        fput(file);
 
        /* Undo any partial mapping done by a device driver. */
        unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
        charged = 0;
+       if (vm_flags & VM_SHARED)
+               mapping_unmap_writable(file->f_mapping);
+allow_write_and_free_vma:
+       if (vm_flags & VM_DENYWRITE)
+               allow_write_access(file);
 free_vma:
        kmem_cache_free(vm_area_cachep, vma);
 unacct_error:
@@ -1626,39 +1640,27 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 
 EXPORT_SYMBOL(find_vma);
 
-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
+/*
+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
+ */
 struct vm_area_struct *
 find_vma_prev(struct mm_struct *mm, unsigned long addr,
                        struct vm_area_struct **pprev)
 {
-       struct vm_area_struct *vma = NULL, *prev = NULL;
-       struct rb_node *rb_node;
-       if (!mm)
-               goto out;
-
-       /* Guard against addr being lower than the first VMA */
-       vma = mm->mmap;
-
-       /* Go through the RB tree quickly. */
-       rb_node = mm->mm_rb.rb_node;
-
-       while (rb_node) {
-               struct vm_area_struct *vma_tmp;
-               vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+       struct vm_area_struct *vma;
 
-               if (addr < vma_tmp->vm_end) {
-                       rb_node = rb_node->rb_left;
-               } else {
-                       prev = vma_tmp;
-                       if (!prev->vm_next || (addr < prev->vm_next->vm_end))
-                               break;
+       vma = find_vma(mm, addr);
+       if (vma) {
+               *pprev = vma->vm_prev;
+       } else {
+               struct rb_node *rb_node = mm->mm_rb.rb_node;
+               *pprev = NULL;
+               while (rb_node) {
+                       *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
                        rb_node = rb_node->rb_right;
                }
        }
-
-out:
-       *pprev = prev;
-       return prev ? prev->vm_next : vma;
+       return vma;
 }
 
 /*
@@ -1727,16 +1729,19 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
-       /* Guard against wrapping around to address 0. */
+       /* Guard against exceeding limits of the address space. */
        address &= PAGE_MASK;
-       address += PAGE_SIZE;
-       if (!address)
+       if (address >= TASK_SIZE)
                return -ENOMEM;
+       address += PAGE_SIZE;
 
        /* Enforce stack_guard_gap */
        gap_addr = address + stack_guard_gap;
-       if (gap_addr < address)
-               return -ENOMEM;
+
+       /* Guard against overflow */
+       if (gap_addr < address || gap_addr > TASK_SIZE)
+               gap_addr = TASK_SIZE;
+
        next = vma->vm_next;
        if (next && next->vm_start < gap_addr) {
                if (!(next->vm_flags & VM_GROWSUP))