Btrfs: fix deadlock caused by the race between relocation
[pandora-kernel.git] / mm / shmem.c
index 0f094a2..32f6763 100644 (file)
@@ -73,6 +73,9 @@ static struct vfsmount *shm_mnt;
 /* Pretend that each entry is of this size in directory's i_size */
 #define BOGO_DIRENT_SIZE 20
 
+/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
+#define SHORT_SYMLINK_LEN 128
+
 struct shmem_xattr {
        struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
        char *name;             /* xattr name */
@@ -329,10 +332,14 @@ repeat:
                if (unlikely(!page))
                        continue;
                if (radix_tree_exception(page)) {
-                       if (radix_tree_exceptional_entry(page))
-                               goto export;
-                       /* radix_tree_deref_retry(page) */
-                       goto restart;
+                       if (radix_tree_deref_retry(page))
+                               goto restart;
+                       /*
+                        * Otherwise, we must be storing a swap entry
+                        * here as an exceptional entry: so return it
+                        * without attempting to raise page count.
+                        */
+                       goto export;
                }
                if (!page_cache_get_speculative(page))
                        goto repeat;
@@ -353,42 +360,6 @@ export:
        return ret;
 }
 
-/*
- * Lockless lookup of swap entry in radix tree, avoiding refcount on pages.
- */
-static pgoff_t shmem_find_swap(struct address_space *mapping, void *radswap)
-{
-       void  **slots[PAGEVEC_SIZE];
-       pgoff_t indices[PAGEVEC_SIZE];
-       unsigned int nr_found;
-
-restart:
-       nr_found = 1;
-       indices[0] = -1;
-       while (nr_found) {
-               pgoff_t index = indices[nr_found - 1] + 1;
-               unsigned int i;
-
-               rcu_read_lock();
-               nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
-                                       slots, indices, index, PAGEVEC_SIZE);
-               for (i = 0; i < nr_found; i++) {
-                       void *item = radix_tree_deref_slot(slots[i]);
-                       if (radix_tree_deref_retry(item)) {
-                               rcu_read_unlock();
-                               goto restart;
-                       }
-                       if (item == radswap) {
-                               rcu_read_unlock();
-                               return indices[i];
-                       }
-               }
-               rcu_read_unlock();
-               cond_resched();
-       }
-       return -1;
-}
-
 /*
  * Remove swap entry from radix tree, free the swap and its page cache.
  */
@@ -585,7 +556,8 @@ static void shmem_evict_inode(struct inode *inode)
                        list_del_init(&info->swaplist);
                        mutex_unlock(&shmem_swaplist_mutex);
                }
-       }
+       } else
+               kfree(info->symlink);
 
        list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
                kfree(xattr->name);
@@ -608,7 +580,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
        int error;
 
        radswap = swp_to_radix_entry(swap);
-       index = shmem_find_swap(mapping, radswap);
+       index = radix_tree_locate_item(&mapping->page_tree, radswap);
        if (index == -1)
                return 0;
 
@@ -1173,7 +1145,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
 
 #ifdef CONFIG_TMPFS
 static const struct inode_operations shmem_symlink_inode_operations;
-static const struct inode_operations shmem_symlink_inline_operations;
+static const struct inode_operations shmem_short_symlink_operations;
 
 static int
 shmem_write_begin(struct file *file, struct address_space *mapping,
@@ -1638,10 +1610,13 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
 
        info = SHMEM_I(inode);
        inode->i_size = len-1;
-       if (len <= SHMEM_SYMLINK_INLINE_LEN) {
-               /* do it inline */
-               memcpy(info->inline_symlink, symname, len);
-               inode->i_op = &shmem_symlink_inline_operations;
+       if (len <= SHORT_SYMLINK_LEN) {
+               info->symlink = kmemdup(symname, len, GFP_KERNEL);
+               if (!info->symlink) {
+                       iput(inode);
+                       return -ENOMEM;
+               }
+               inode->i_op = &shmem_short_symlink_operations;
        } else {
                error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
                if (error) {
@@ -1664,9 +1639,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
        return 0;
 }
 
-static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
+static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
 {
-       nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
+       nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
        return NULL;
 }
 
@@ -1914,9 +1889,9 @@ static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
 }
 #endif /* CONFIG_TMPFS_XATTR */
 
-static const struct inode_operations shmem_symlink_inline_operations = {
+static const struct inode_operations shmem_short_symlink_operations = {
        .readlink       = generic_readlink,
-       .follow_link    = shmem_follow_link_inline,
+       .follow_link    = shmem_follow_short_symlink,
 #ifdef CONFIG_TMPFS_XATTR
        .setxattr       = shmem_setxattr,
        .getxattr       = shmem_getxattr,
@@ -2259,10 +2234,8 @@ static void shmem_destroy_callback(struct rcu_head *head)
 
 static void shmem_destroy_inode(struct inode *inode)
 {
-       if ((inode->i_mode & S_IFMT) == S_IFREG) {
-               /* only struct inode is valid if it's an inline symlink */
+       if ((inode->i_mode & S_IFMT) == S_IFREG)
                mpol_free_shared_policy(&SHMEM_I(inode)->policy);
-       }
        call_rcu(&inode->i_rcu, shmem_destroy_callback);
 }