mm: allow drivers to prevent new writable mappings
authorDavid Herrmann <dh.herrmann@gmail.com>
Fri, 8 Aug 2014 21:25:25 +0000 (14:25 -0700)
committerGrazvydas Ignotas <notasas@gmail.com>
Sat, 16 Sep 2017 21:29:32 +0000 (00:29 +0300)
This patch (of 6):

The i_mmap_writable field counts existing writable mappings of an
address_space.  To allow drivers to prevent new writable mappings, make
this counter signed and prevent new writable mappings if it is negative.
This is modelled after i_writecount and DENYWRITE.

This will be required by the shmem-sealing infrastructure to prevent any
new writable mappings after the WRITE seal has been set.  In case there
exists a writable mapping, this operation will fail with EBUSY.

Note that we rely on the fact that iff you already own a writable mapping,
you can increase the counter without using the helpers.  This is the same
that we do for i_writecount.

Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Ryan Lortie <desrt@desrt.ca>
Cc: Lennart Poettering <lennart@poettering.net>
Cc: Daniel Mack <zonque@gmail.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/inode.c
include/linux/fs.h
kernel/fork.c
mm/mmap.c
mm/swap_state.c

index d62cb51..6953cd5 100644 (file)
@@ -174,6 +174,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        mapping->a_ops = &empty_aops;
        mapping->host = inode;
        mapping->flags = 0;
+       atomic_set(&mapping->i_mmap_writable, 0);
        mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
        mapping->assoc_mapping = NULL;
        mapping->backing_dev_info = &default_backing_dev_info;
index 52c56f1..56d62fd 100644 (file)
@@ -651,7 +651,7 @@ struct address_space {
        struct inode            *host;          /* owner: inode, block_device */
        struct radix_tree_root  page_tree;      /* radix tree of all pages */
        spinlock_t              tree_lock;      /* and lock protecting it */
-       unsigned int            i_mmap_writable;/* count VM_SHARED mappings */
+       atomic_t                i_mmap_writable;/* count VM_SHARED mappings */
        struct prio_tree_root   i_mmap;         /* tree of private and shared mappings */
        struct list_head        i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
        struct mutex            i_mmap_mutex;   /* protect tree, count, list */
@@ -731,10 +731,35 @@ static inline int mapping_mapped(struct address_space *mapping)
  * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
  * marks vma as VM_SHARED if it is shared, and the file was opened for
  * writing i.e. vma may be mprotected writable even if now readonly.
+ *
+ * If i_mmap_writable is negative, no new writable mappings are allowed. You
+ * can only deny writable mappings, if none exists right now.
  */
 static inline int mapping_writably_mapped(struct address_space *mapping)
 {
-       return mapping->i_mmap_writable != 0;
+       return atomic_read(&mapping->i_mmap_writable) > 0;
+}
+
+static inline int mapping_map_writable(struct address_space *mapping)
+{
+       return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
+               0 : -EPERM;
+}
+
+static inline void mapping_unmap_writable(struct address_space *mapping)
+{
+       atomic_dec(&mapping->i_mmap_writable);
+}
+
+static inline int mapping_deny_writable(struct address_space *mapping)
+{
+       return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
+               0 : -EBUSY;
+}
+
+static inline void mapping_allow_writable(struct address_space *mapping)
+{
+       atomic_inc(&mapping->i_mmap_writable);
 }
 
 /*
index 29b4604..59ca16f 100644 (file)
@@ -383,7 +383,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                                atomic_dec(&inode->i_writecount);
                        mutex_lock(&mapping->i_mmap_mutex);
                        if (tmp->vm_flags & VM_SHARED)
-                               mapping->i_mmap_writable++;
+                               atomic_inc(&mapping->i_mmap_writable);
                        flush_dcache_mmap_lock(mapping);
                        /* insert tmp into the share list, just after mpnt */
                        vma_prio_tree_add(tmp, mpnt);
index f04b71c..a34afb8 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -194,7 +194,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
        if (vma->vm_flags & VM_DENYWRITE)
                atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
        if (vma->vm_flags & VM_SHARED)
-               mapping->i_mmap_writable--;
+               mapping_unmap_writable(mapping);
 
        flush_dcache_mmap_lock(mapping);
        if (unlikely(vma->vm_flags & VM_NONLINEAR))
@@ -410,7 +410,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
                if (vma->vm_flags & VM_DENYWRITE)
                        atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
                if (vma->vm_flags & VM_SHARED)
-                       mapping->i_mmap_writable++;
+                       atomic_inc(&mapping->i_mmap_writable);
 
                flush_dcache_mmap_lock(mapping);
                if (unlikely(vma->vm_flags & VM_NONLINEAR))
@@ -1279,6 +1279,17 @@ munmap_back:
                        if (error)
                                goto free_vma;
                }
+               if (vm_flags & VM_SHARED) {
+                       error = mapping_map_writable(file->f_mapping);
+                       if (error)
+                               goto allow_write_and_free_vma;
+               }
+
+               /* ->mmap() can change vma->vm_file, but must guarantee that
+                * vma_link() below can deny write-access if VM_DENYWRITE is set
+                * and map writably if VM_SHARED is set. This usually means the
+                * new file must not have been exposed to user-space, yet.
+                */
                vma->vm_file = get_file(file);
                error = file->f_op->mmap(file, vma);
                if (error)
@@ -1317,8 +1328,12 @@ munmap_back:
 
        vma_link(mm, vma, prev, rb_link, rb_parent);
        /* Once vma denies write, undo our temporary denial count */
-       if (vm_flags & VM_DENYWRITE)
-               allow_write_access(file);
+       if (file) {
+               if (vm_flags & VM_SHARED)
+                       mapping_unmap_writable(file->f_mapping);
+               if (vm_flags & VM_DENYWRITE)
+                       allow_write_access(file);
+       }
        file = vma->vm_file;
 out:
        perf_event_mmap(vma);
@@ -1333,14 +1348,17 @@ out:
        return addr;
 
 unmap_and_free_vma:
-       if (vm_flags & VM_DENYWRITE)
-               allow_write_access(file);
        vma->vm_file = NULL;
        fput(file);
 
        /* Undo any partial mapping done by a device driver. */
        unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
        charged = 0;
+       if (vm_flags & VM_SHARED)
+               mapping_unmap_writable(file->f_mapping);
+allow_write_and_free_vma:
+       if (vm_flags & VM_DENYWRITE)
+               allow_write_access(file);
 free_vma:
        kmem_cache_free(vm_area_cachep, vma);
 unacct_error:
index 7b3dadd..e4f4101 100644 (file)
@@ -38,6 +38,7 @@ static struct backing_dev_info swap_backing_dev_info = {
 
 struct address_space swapper_space = {
        .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
+       .i_mmap_writable = ATOMIC_INIT(0),
        .tree_lock      = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
        .a_ops          = &swap_aops,
        .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),