shmem: fix faulting into a hole while it's punched
[pandora-kernel.git] / mm / shmem.c
index d672250..042b4af 100644 (file)
@@ -76,6 +76,16 @@ static struct vfsmount *shm_mnt;
 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
 #define SHORT_SYMLINK_LEN 128
 
+/*
+ * vmtruncate_range() communicates with shmem_fault via
+ * inode->i_private (with i_mutex making sure that it has only one user at
+ * a time): we would prefer not to enlarge the shmem inode just for that.
+ */
+struct shmem_falloc {
+       pgoff_t start;          /* start of range currently being fallocated */
+       pgoff_t next;           /* the next page offset to be fallocated */
+};
+
 struct shmem_xattr {
        struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
        char *name;             /* xattr name */
@@ -379,7 +389,7 @@ static int shmem_free_swap(struct address_space *mapping,
 /*
  * Pagevec may contain swap entries, so shuffle up pages before releasing.
  */
-static void shmem_pagevec_release(struct pagevec *pvec)
+static void shmem_deswap_pagevec(struct pagevec *pvec)
 {
        int i, j;
 
@@ -389,7 +399,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
                        pvec->pages[j++] = page;
        }
        pvec->nr = j;
-       pagevec_release(pvec);
+}
+
+/*
+ * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
+ */
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+       struct pagevec pvec;
+       pgoff_t indices[PAGEVEC_SIZE];
+       pgoff_t index = 0;
+
+       pagevec_init(&pvec, 0);
+       /*
+        * Minor point, but we might as well stop if someone else SHM_LOCKs it.
+        */
+       while (!mapping_unevictable(mapping)) {
+               /*
+                * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
+                * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
+                */
+               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+                                       PAGEVEC_SIZE, pvec.pages, indices);
+               if (!pvec.nr)
+                       break;
+               index = indices[pvec.nr - 1] + 1;
+               shmem_deswap_pagevec(&pvec);
+               check_move_unevictable_pages(pvec.pages, pvec.nr);
+               pagevec_release(&pvec);
+               cond_resched();
+       }
 }
 
 /*
@@ -440,7 +479,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        }
                        unlock_page(page);
                }
-               shmem_pagevec_release(&pvec);
+               shmem_deswap_pagevec(&pvec);
+               pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
                index++;
@@ -470,7 +510,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        continue;
                }
                if (index == start && indices[0] > end) {
-                       shmem_pagevec_release(&pvec);
+                       shmem_deswap_pagevec(&pvec);
+                       pagevec_release(&pvec);
                        break;
                }
                mem_cgroup_uncharge_start();
@@ -494,7 +535,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        }
                        unlock_page(page);
                }
-               shmem_pagevec_release(&pvec);
+               shmem_deswap_pagevec(&pvec);
+               pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                index++;
        }
@@ -563,7 +605,7 @@ static void shmem_evict_inode(struct inode *inode)
                kfree(xattr->name);
                kfree(xattr);
        }
-       BUG_ON(inode->i_blocks);
+       WARN_ON(inode->i_blocks);
        shmem_free_inode(inode->i_sb);
        end_writeback(inode);
 }
@@ -766,24 +808,28 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
                        struct shmem_inode_info *info, pgoff_t index)
 {
-       struct mempolicy mpol, *spol;
        struct vm_area_struct pvma;
-
-       spol = mpol_cond_copy(&mpol,
-                       mpol_shared_policy_lookup(&info->policy, index));
+       struct page *page;
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
        pvma.vm_pgoff = index;
        pvma.vm_ops = NULL;
-       pvma.vm_policy = spol;
-       return swapin_readahead(swap, gfp, &pvma, 0);
+       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
+
+       page = swapin_readahead(swap, gfp, &pvma, 0);
+
+       /* Drop reference taken by mpol_shared_policy_lookup() */
+       mpol_cond_put(pvma.vm_policy);
+
+       return page;
 }
 
 static struct page *shmem_alloc_page(gfp_t gfp,
                        struct shmem_inode_info *info, pgoff_t index)
 {
        struct vm_area_struct pvma;
+       struct page *page;
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
@@ -791,10 +837,12 @@ static struct page *shmem_alloc_page(gfp_t gfp,
        pvma.vm_ops = NULL;
        pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
 
-       /*
-        * alloc_page_vma() will drop the shared policy reference
-        */
-       return alloc_page_vma(gfp, &pvma, 0);
+       page = alloc_page_vma(gfp, &pvma, 0);
+
+       /* Drop reference taken by mpol_shared_policy_lookup() */
+       mpol_cond_put(pvma.vm_policy);
+
+       return page;
 }
 #else /* !CONFIG_NUMA */
 #ifdef CONFIG_TMPFS
@@ -1022,6 +1070,43 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int error;
        int ret = VM_FAULT_LOCKED;
 
+       /*
+        * Trinity finds that probing a hole which tmpfs is punching can
+        * prevent the hole-punch from ever completing: which in turn
+        * locks writers out with its hold on i_mutex.  So refrain from
+        * faulting pages into the hole while it's being punched, and
+        * wait on i_mutex to be released if vmf->flags permits.
+        */
+       if (unlikely(inode->i_private)) {
+               struct shmem_falloc *shmem_falloc;
+
+               spin_lock(&inode->i_lock);
+               shmem_falloc = inode->i_private;
+               if (!shmem_falloc ||
+                   vmf->pgoff < shmem_falloc->start ||
+                   vmf->pgoff >= shmem_falloc->next)
+                       shmem_falloc = NULL;
+               spin_unlock(&inode->i_lock);
+               /*
+                * i_lock has protected us from taking shmem_falloc seriously
+                * once return from vmtruncate_range() went back up that stack.
+                * i_lock does not serialize with i_mutex at all, but it does
+                * not matter if sometimes we wait unnecessarily, or sometimes
+                * miss out on waiting: we just need to make those cases rare.
+                */
+               if (shmem_falloc) {
+                       if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+                          !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               up_read(&vma->vm_mm->mmap_sem);
+                               mutex_lock(&inode->i_mutex);
+                               mutex_unlock(&inode->i_mutex);
+                               return VM_FAULT_RETRY;
+                       }
+                       /* cond_resched? Leave that to GUP or return to user */
+                       return VM_FAULT_NOPAGE;
+               }
+       }
+
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1033,6 +1118,44 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return ret;
 }
 
+int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+{
+       /*
+        * If the underlying filesystem is not going to provide
+        * a way to truncate a range of blocks (punch a hole) -
+        * we should return failure right now.
+        * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
+        */
+       if (inode->i_op->truncate_range != shmem_truncate_range)
+               return -ENOSYS;
+
+       mutex_lock(&inode->i_mutex);
+       {
+               struct shmem_falloc shmem_falloc;
+               struct address_space *mapping = inode->i_mapping;
+               loff_t unmap_start = round_up(lstart, PAGE_SIZE);
+               loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
+
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+               inode->i_private = &shmem_falloc;
+               spin_unlock(&inode->i_lock);
+
+               if ((u64)unmap_end > (u64)unmap_start)
+                       unmap_mapping_range(mapping, unmap_start,
+                                           1 + unmap_end - unmap_start, 0);
+               shmem_truncate_range(inode, lstart, lend);
+               /* No need to unmap again: hole-punching leaves COWed pages */
+
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               spin_unlock(&inode->i_lock);
+       }
+       mutex_unlock(&inode->i_mutex);
+       return 0;
+}
+
 #ifdef CONFIG_NUMA
 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
 {
@@ -1068,13 +1191,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
                user_shm_unlock(inode->i_size, user);
                info->flags &= ~VM_LOCKED;
                mapping_clear_unevictable(file->f_mapping);
-               /*
-                * Ensure that a racing putback_lru_page() can see
-                * the pages of this mapping are evictable when we
-                * skip them due to !PageLRU during the scan.
-                */
-               smp_mb__after_clear_bit();
-               scan_mapping_unevictable_pages(file->f_mapping);
        }
        retval = 0;
 
@@ -1334,6 +1450,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1422,7 +1539,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 
        if (error > 0) {
                *ppos += error;
@@ -1936,12 +2053,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
 {
        struct inode *inode;
        struct dentry *dentry = NULL;
-       u64 inum = fid->raw[2];
-       inum = (inum << 32) | fid->raw[1];
+       u64 inum;
 
        if (fh_len < 3)
                return NULL;
 
+       inum = fid->raw[2];
+       inum = (inum << 32) | fid->raw[1];
+
        inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
                        shmem_match, fid->raw);
        if (inode) {
@@ -2087,6 +2206,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
        unsigned long inodes;
        int error = -EINVAL;
 
+       config.mpol = NULL;
        if (shmem_parse_options(data, &config, true))
                return error;
 
@@ -2111,8 +2231,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
        sbinfo->max_inodes  = config.max_inodes;
        sbinfo->free_inodes = config.max_inodes - inodes;
 
-       mpol_put(sbinfo->mpol);
-       sbinfo->mpol        = config.mpol;      /* transfers initial ref */
+       /*
+        * Preserve previous mempolicy unless mpol remount option was specified.
+        */
+       if (config.mpol) {
+               mpol_put(sbinfo->mpol);
+               sbinfo->mpol = config.mpol;     /* transfers initial ref */
+       }
 out:
        spin_unlock(&sbinfo->stat_lock);
        return error;
@@ -2446,12 +2571,22 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
        return 0;
 }
 
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+}
+
 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 {
        truncate_inode_pages_range(inode->i_mapping, lstart, lend);
 }
 EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
+int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+{
+       /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */
+       return -ENOSYS;
+}
+
 #define shmem_vm_ops                           generic_file_vm_ops
 #define shmem_file_operations                  ramfs_file_operations
 #define shmem_get_inode(sb, dir, mode, dev, flags)     ramfs_get_inode(sb, dir, mode, dev)