Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Feb 2011 16:00:35 +0000 (08:00 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Feb 2011 16:00:35 +0000 (08:00 -0800)
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: check return value of alloc_extent_map()
  Btrfs - Fix memory leak in btrfs_init_new_device()
  btrfs: prevent heap corruption in btrfs_ioctl_space_info()
  Btrfs: Fix balance panic
  Btrfs: don't release pages when we can't clear the uptodate bits
  Btrfs: fix page->private races

1  2 
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/volumes.c

diff --combined fs/btrfs/disk-io.c
@@@ -359,10 -359,14 +359,14 @@@ static int csum_dirty_buffer(struct btr
  
        tree = &BTRFS_I(page->mapping->host)->io_tree;
  
-       if (page->private == EXTENT_PAGE_PRIVATE)
+       if (page->private == EXTENT_PAGE_PRIVATE) {
+               WARN_ON(1);
                goto out;
-       if (!page->private)
+       }
+       if (!page->private) {
+               WARN_ON(1);
                goto out;
+       }
        len = page->private >> 2;
        WARN_ON(len == 0);
  
@@@ -2127,7 -2131,7 +2131,7 @@@ static void btrfs_end_buffer_write_sync
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
 -              if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
 +              if (printk_ratelimit()) {
                        printk(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
                                       bdevname(bh->b_bdev, b));
@@@ -2264,10 -2268,21 +2268,10 @@@ static int write_dev_supers(struct btrf
                        bh->b_end_io = btrfs_end_buffer_write_sync;
                }
  
 -              if (i == last_barrier && do_barriers && device->barriers) {
 -                      ret = submit_bh(WRITE_BARRIER, bh);
 -                      if (ret == -EOPNOTSUPP) {
 -                              printk("btrfs: disabling barriers on dev %s\n",
 -                                     device->name);
 -                              set_buffer_uptodate(bh);
 -                              device->barriers = 0;
 -                              /* one reference for submit_bh */
 -                              get_bh(bh);
 -                              lock_buffer(bh);
 -                              ret = submit_bh(WRITE_SYNC, bh);
 -                      }
 -              } else {
 +              if (i == last_barrier && do_barriers)
 +                      ret = submit_bh(WRITE_FLUSH_FUA, bh);
 +              else
                        ret = submit_bh(WRITE_SYNC, bh);
 -              }
  
                if (ret)
                        errors++;
diff --combined fs/btrfs/extent-tree.c
@@@ -1743,7 -1743,8 +1743,7 @@@ static int remove_extent_backref(struc
  static void btrfs_issue_discard(struct block_device *bdev,
                                u64 start, u64 len)
  {
 -      blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
 -                      BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
 +      blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
  }
  
  static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
@@@ -6583,7 -6584,7 +6583,7 @@@ static noinline int relocate_data_exten
        u64 end = start + extent_key->offset - 1;
  
        em = alloc_extent_map(GFP_NOFS);
-       BUG_ON(!em || IS_ERR(em));
+       BUG_ON(!em);
  
        em->start = start;
        em->len = extent_key->offset;
diff --combined fs/btrfs/extent_io.c
@@@ -1946,6 -1946,7 +1946,7 @@@ void set_page_extent_mapped(struct pag
  
  static void set_page_extent_head(struct page *page, unsigned long len)
  {
+       WARN_ON(!PagePrivate(page));
        set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
  }
  
@@@ -2821,9 -2822,17 +2822,17 @@@ int try_release_extent_state(struct ext
                 * at this point we can safely clear everything except the
                 * locked bit and the nodatasum bit
                 */
-               clear_extent_bit(tree, start, end,
+               ret = clear_extent_bit(tree, start, end,
                                 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
                                 0, 0, NULL, mask);
+               /* if clear_extent_bit failed for enomem reasons,
+                * we can't allow the release to continue.
+                */
+               if (ret < 0)
+                       ret = 0;
+               else
+                       ret = 1;
        }
        return ret;
  }
@@@ -3083,6 -3092,7 +3092,6 @@@ static struct extent_buffer *__alloc_ex
        eb->len = len;
        spin_lock_init(&eb->lock);
        init_waitqueue_head(&eb->lock_wq);
 -      INIT_RCU_HEAD(&eb->rcu_head);
  
  #if LEAK_DEBUG
        spin_lock_irqsave(&leak_lock, flags);
@@@ -3194,7 -3204,13 +3203,13 @@@ struct extent_buffer *alloc_extent_buff
                }
                if (!PageUptodate(p))
                        uptodate = 0;
-               unlock_page(p);
+               /*
+                * see below about how we avoid a nasty race with release page
+                * and why we unlock later
+                */
+               if (i != 0)
+                       unlock_page(p);
        }
        if (uptodate)
                set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
        atomic_inc(&eb->refs);
        spin_unlock(&tree->buffer_lock);
        radix_tree_preload_end();
+       /*
+        * there is a race where release page may have
+        * tried to find this extent buffer in the radix
+        * but failed.  It will tell the VM it is safe to
+        * reclaim the, and it will clear the page private bit.
+        * We must make sure to set the page private bit properly
+        * after the extent buffer is in the radix tree so
+        * it doesn't get lost
+        */
+       set_page_extent_mapped(eb->first_page);
+       set_page_extent_head(eb->first_page, eb->len);
+       if (!page0)
+               unlock_page(eb->first_page);
        return eb;
  
  free_eb:
+       if (eb->first_page && !page0)
+               unlock_page(eb->first_page);
        if (!atomic_dec_and_test(&eb->refs))
                return exists;
        btrfs_release_extent_buffer(eb);
@@@ -3271,10 -3304,11 +3303,11 @@@ int clear_extent_buffer_dirty(struct ex
                        continue;
  
                lock_page(page);
+               WARN_ON(!PagePrivate(page));
+               set_page_extent_mapped(page);
                if (i == 0)
                        set_page_extent_head(page, eb->len);
-               else
-                       set_page_private(page, EXTENT_PAGE_PRIVATE);
  
                clear_page_dirty_for_io(page);
                spin_lock_irq(&page->mapping->tree_lock);
@@@ -3464,6 -3498,13 +3497,13 @@@ int read_extent_buffer_pages(struct ext
  
        for (i = start_i; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
+               WARN_ON(!PagePrivate(page));
+               set_page_extent_mapped(page);
+               if (i == 0)
+                       set_page_extent_head(page, eb->len);
                if (inc_all_pages)
                        page_cache_get(page);
                if (!PageUptodate(page)) {
diff --combined fs/btrfs/file.c
@@@ -24,7 -24,6 +24,7 @@@
  #include <linux/string.h>
  #include <linux/backing-dev.h>
  #include <linux/mpage.h>
 +#include <linux/falloc.h>
  #include <linux/swap.h>
  #include <linux/writeback.h>
  #include <linux/statfs.h>
@@@ -186,6 -185,7 +186,7 @@@ int btrfs_drop_extent_cache(struct inod
                        split = alloc_extent_map(GFP_NOFS);
                if (!split2)
                        split2 = alloc_extent_map(GFP_NOFS);
+               BUG_ON(!split || !split2);
  
                write_lock(&em_tree->lock);
                em = lookup_extent_mapping(em_tree, start, len);
@@@ -1259,117 -1259,6 +1260,117 @@@ static int btrfs_file_mmap(struct file      
        return 0;
  }
  
 +static long btrfs_fallocate(struct file *file, int mode,
 +                          loff_t offset, loff_t len)
 +{
 +      struct inode *inode = file->f_path.dentry->d_inode;
 +      struct extent_state *cached_state = NULL;
 +      u64 cur_offset;
 +      u64 last_byte;
 +      u64 alloc_start;
 +      u64 alloc_end;
 +      u64 alloc_hint = 0;
 +      u64 locked_end;
 +      u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
 +      struct extent_map *em;
 +      int ret;
 +
 +      alloc_start = offset & ~mask;
 +      alloc_end =  (offset + len + mask) & ~mask;
 +
 +      /* We only support the FALLOC_FL_KEEP_SIZE mode */
 +      if (mode & ~FALLOC_FL_KEEP_SIZE)
 +              return -EOPNOTSUPP;
 +
 +      /*
 +       * wait for ordered IO before we have any locks.  We'll loop again
 +       * below with the locks held.
 +       */
 +      btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
 +
 +      mutex_lock(&inode->i_mutex);
 +      ret = inode_newsize_ok(inode, alloc_end);
 +      if (ret)
 +              goto out;
 +
 +      if (alloc_start > inode->i_size) {
 +              ret = btrfs_cont_expand(inode, alloc_start);
 +              if (ret)
 +                      goto out;
 +      }
 +
 +      ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
 +      if (ret)
 +              goto out;
 +
 +      locked_end = alloc_end - 1;
 +      while (1) {
 +              struct btrfs_ordered_extent *ordered;
 +
 +              /* the extent lock is ordered inside the running
 +               * transaction
 +               */
 +              lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
 +                               locked_end, 0, &cached_state, GFP_NOFS);
 +              ordered = btrfs_lookup_first_ordered_extent(inode,
 +                                                          alloc_end - 1);
 +              if (ordered &&
 +                  ordered->file_offset + ordered->len > alloc_start &&
 +                  ordered->file_offset < alloc_end) {
 +                      btrfs_put_ordered_extent(ordered);
 +                      unlock_extent_cached(&BTRFS_I(inode)->io_tree,
 +                                           alloc_start, locked_end,
 +                                           &cached_state, GFP_NOFS);
 +                      /*
 +                       * we can't wait on the range with the transaction
 +                       * running or with the extent lock held
 +                       */
 +                      btrfs_wait_ordered_range(inode, alloc_start,
 +                                               alloc_end - alloc_start);
 +              } else {
 +                      if (ordered)
 +                              btrfs_put_ordered_extent(ordered);
 +                      break;
 +              }
 +      }
 +
 +      cur_offset = alloc_start;
 +      while (1) {
 +              em = btrfs_get_extent(inode, NULL, 0, cur_offset,
 +                                    alloc_end - cur_offset, 0);
 +              BUG_ON(IS_ERR(em) || !em);
 +              last_byte = min(extent_map_end(em), alloc_end);
 +              last_byte = (last_byte + mask) & ~mask;
 +              if (em->block_start == EXTENT_MAP_HOLE ||
 +                  (cur_offset >= inode->i_size &&
 +                   !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
 +                      ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
 +                                                      last_byte - cur_offset,
 +                                                      1 << inode->i_blkbits,
 +                                                      offset + len,
 +                                                      &alloc_hint);
 +                      if (ret < 0) {
 +                              free_extent_map(em);
 +                              break;
 +                      }
 +              }
 +              free_extent_map(em);
 +
 +              cur_offset = last_byte;
 +              if (cur_offset >= alloc_end) {
 +                      ret = 0;
 +                      break;
 +              }
 +      }
 +      unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
 +                           &cached_state, GFP_NOFS);
 +
 +      btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
 +out:
 +      mutex_unlock(&inode->i_mutex);
 +      return ret;
 +}
 +
  const struct file_operations btrfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .open           = generic_file_open,
        .release        = btrfs_release_file,
        .fsync          = btrfs_sync_file,
 +      .fallocate      = btrfs_fallocate,
        .unlocked_ioctl = btrfs_ioctl,
  #ifdef CONFIG_COMPAT
        .compat_ioctl   = btrfs_ioctl,
diff --combined fs/btrfs/inode.c
@@@ -644,6 -644,7 +644,7 @@@ retry
                                        async_extent->ram_size - 1, 0);
  
                em = alloc_extent_map(GFP_NOFS);
+               BUG_ON(!em);
                em->start = async_extent->start;
                em->len = async_extent->ram_size;
                em->orig_start = em->start;
@@@ -820,6 -821,7 +821,7 @@@ static noinline int cow_file_range(stru
                BUG_ON(ret);
  
                em = alloc_extent_map(GFP_NOFS);
+               BUG_ON(!em);
                em->start = start;
                em->orig_start = em->start;
                ram_size = ins.offset;
@@@ -1169,6 -1171,7 +1171,7 @@@ out_check
                        struct extent_map_tree *em_tree;
                        em_tree = &BTRFS_I(inode)->extent_tree;
                        em = alloc_extent_map(GFP_NOFS);
+                       BUG_ON(!em);
                        em->start = cur_offset;
                        em->orig_start = em->start;
                        em->len = num_bytes;
@@@ -3902,7 -3905,7 +3905,7 @@@ again
        p = &root->inode_tree.rb_node;
        parent = NULL;
  
 -      if (hlist_unhashed(&inode->i_hash))
 +      if (inode_unhashed(inode))
                return;
  
        spin_lock(&root->inode_lock);
@@@ -4109,6 -4112,8 +4112,6 @@@ struct inode *btrfs_lookup_dentry(struc
        int index;
        int ret;
  
 -      dentry->d_op = &btrfs_dentry_operations;
 -
        if (dentry->d_name.len > BTRFS_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
  
        return inode;
  }
  
 -static int btrfs_dentry_delete(struct dentry *dentry)
 +static int btrfs_dentry_delete(const struct dentry *dentry)
  {
        struct btrfs_root *root;
  
@@@ -4828,7 -4833,7 +4831,7 @@@ static int btrfs_link(struct dentry *ol
        }
  
        btrfs_set_trans_block_group(trans, dir);
 -      atomic_inc(&inode->i_count);
 +      ihold(inode);
  
        err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
  
@@@ -6528,13 -6533,6 +6531,13 @@@ struct inode *btrfs_alloc_inode(struct 
        return inode;
  }
  
 +static void btrfs_i_callback(struct rcu_head *head)
 +{
 +      struct inode *inode = container_of(head, struct inode, i_rcu);
 +      INIT_LIST_HEAD(&inode->i_dentry);
 +      kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
 +}
 +
  void btrfs_destroy_inode(struct inode *inode)
  {
        struct btrfs_ordered_extent *ordered;
        inode_tree_del(inode);
        btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
  free:
 -      kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
 +      call_rcu(&inode->i_rcu, btrfs_i_callback);
  }
  
  int btrfs_drop_inode(struct inode *inode)
@@@ -7133,12 -7131,118 +7136,12 @@@ int btrfs_prealloc_file_range_trans(str
                                           min_size, actual_len, alloc_hint, trans);
  }
  
 -static long btrfs_fallocate(struct inode *inode, int mode,
 -                          loff_t offset, loff_t len)
 -{
 -      struct extent_state *cached_state = NULL;
 -      u64 cur_offset;
 -      u64 last_byte;
 -      u64 alloc_start;
 -      u64 alloc_end;
 -      u64 alloc_hint = 0;
 -      u64 locked_end;
 -      u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
 -      struct extent_map *em;
 -      int ret;
 -
 -      alloc_start = offset & ~mask;
 -      alloc_end =  (offset + len + mask) & ~mask;
 -
 -      /*
 -       * wait for ordered IO before we have any locks.  We'll loop again
 -       * below with the locks held.
 -       */
 -      btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
 -
 -      mutex_lock(&inode->i_mutex);
 -      ret = inode_newsize_ok(inode, alloc_end);
 -      if (ret)
 -              goto out;
 -
 -      if (alloc_start > inode->i_size) {
 -              ret = btrfs_cont_expand(inode, alloc_start);
 -              if (ret)
 -                      goto out;
 -      }
 -
 -      ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
 -      if (ret)
 -              goto out;
 -
 -      locked_end = alloc_end - 1;
 -      while (1) {
 -              struct btrfs_ordered_extent *ordered;
 -
 -              /* the extent lock is ordered inside the running
 -               * transaction
 -               */
 -              lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
 -                               locked_end, 0, &cached_state, GFP_NOFS);
 -              ordered = btrfs_lookup_first_ordered_extent(inode,
 -                                                          alloc_end - 1);
 -              if (ordered &&
 -                  ordered->file_offset + ordered->len > alloc_start &&
 -                  ordered->file_offset < alloc_end) {
 -                      btrfs_put_ordered_extent(ordered);
 -                      unlock_extent_cached(&BTRFS_I(inode)->io_tree,
 -                                           alloc_start, locked_end,
 -                                           &cached_state, GFP_NOFS);
 -                      /*
 -                       * we can't wait on the range with the transaction
 -                       * running or with the extent lock held
 -                       */
 -                      btrfs_wait_ordered_range(inode, alloc_start,
 -                                               alloc_end - alloc_start);
 -              } else {
 -                      if (ordered)
 -                              btrfs_put_ordered_extent(ordered);
 -                      break;
 -              }
 -      }
 -
 -      cur_offset = alloc_start;
 -      while (1) {
 -              em = btrfs_get_extent(inode, NULL, 0, cur_offset,
 -                                    alloc_end - cur_offset, 0);
 -              BUG_ON(IS_ERR(em) || !em);
 -              last_byte = min(extent_map_end(em), alloc_end);
 -              last_byte = (last_byte + mask) & ~mask;
 -              if (em->block_start == EXTENT_MAP_HOLE ||
 -                  (cur_offset >= inode->i_size &&
 -                   !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
 -                      ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
 -                                                      last_byte - cur_offset,
 -                                                      1 << inode->i_blkbits,
 -                                                      offset + len,
 -                                                      &alloc_hint);
 -                      if (ret < 0) {
 -                              free_extent_map(em);
 -                              break;
 -                      }
 -              }
 -              free_extent_map(em);
 -
 -              cur_offset = last_byte;
 -              if (cur_offset >= alloc_end) {
 -                      ret = 0;
 -                      break;
 -              }
 -      }
 -      unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
 -                           &cached_state, GFP_NOFS);
 -
 -      btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
 -out:
 -      mutex_unlock(&inode->i_mutex);
 -      return ret;
 -}
 -
  static int btrfs_set_page_dirty(struct page *page)
  {
        return __set_page_dirty_nobuffers(page);
  }
  
 -static int btrfs_permission(struct inode *inode, int mask)
 +static int btrfs_permission(struct inode *inode, int mask, unsigned int flags)
  {
        struct btrfs_root *root = BTRFS_I(inode)->root;
  
                return -EROFS;
        if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
                return -EACCES;
 -      return generic_permission(inode, mask, btrfs_check_acl);
 +      return generic_permission(inode, mask, flags, btrfs_check_acl);
  }
  
  static const struct inode_operations btrfs_dir_inode_operations = {
@@@ -7239,6 -7343,7 +7242,6 @@@ static const struct inode_operations bt
        .listxattr      = btrfs_listxattr,
        .removexattr    = btrfs_removexattr,
        .permission     = btrfs_permission,
 -      .fallocate      = btrfs_fallocate,
        .fiemap         = btrfs_fiemap,
  };
  static const struct inode_operations btrfs_special_inode_operations = {
diff --combined fs/btrfs/volumes.c
@@@ -399,6 -399,7 +399,6 @@@ static noinline int device_list_add(con
                device->work.func = pending_bios_fn;
                memcpy(device->uuid, disk_super->dev_item.uuid,
                       BTRFS_UUID_SIZE);
 -              device->barriers = 1;
                spin_lock_init(&device->io_lock);
                device->name = kstrdup(path, GFP_NOFS);
                if (!device->name) {
@@@ -466,6 -467,7 +466,6 @@@ static struct btrfs_fs_devices *clone_f
                device->devid = orig_dev->devid;
                device->work.func = pending_bios_fn;
                memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
 -              device->barriers = 1;
                spin_lock_init(&device->io_lock);
                INIT_LIST_HEAD(&device->dev_list);
                INIT_LIST_HEAD(&device->dev_alloc_list);
@@@ -494,7 -496,7 +494,7 @@@ again
                        continue;
  
                if (device->bdev) {
 -                      close_bdev_exclusive(device->bdev, device->mode);
 +                      blkdev_put(device->bdev, device->mode);
                        device->bdev = NULL;
                        fs_devices->open_devices--;
                }
@@@ -528,7 -530,7 +528,7 @@@ static int __btrfs_close_devices(struc
  
        list_for_each_entry(device, &fs_devices->devices, dev_list) {
                if (device->bdev) {
 -                      close_bdev_exclusive(device->bdev, device->mode);
 +                      blkdev_put(device->bdev, device->mode);
                        fs_devices->open_devices--;
                }
                if (device->writeable) {
@@@ -585,15 -587,13 +585,15 @@@ static int __btrfs_open_devices(struct 
        int seeding = 1;
        int ret = 0;
  
 +      flags |= FMODE_EXCL;
 +
        list_for_each_entry(device, head, dev_list) {
                if (device->bdev)
                        continue;
                if (!device->name)
                        continue;
  
 -              bdev = open_bdev_exclusive(device->name, flags, holder);
 +              bdev = blkdev_get_by_path(device->name, flags, holder);
                if (IS_ERR(bdev)) {
                        printk(KERN_INFO "open %s failed\n", device->name);
                        goto error;
  error_brelse:
                brelse(bh);
  error_close:
 -              close_bdev_exclusive(bdev, FMODE_READ);
 +              blkdev_put(bdev, flags);
  error:
                continue;
        }
@@@ -693,8 -693,7 +693,8 @@@ int btrfs_scan_one_device(const char *p
  
        mutex_lock(&uuid_mutex);
  
 -      bdev = open_bdev_exclusive(path, flags, holder);
 +      flags |= FMODE_EXCL;
 +      bdev = blkdev_get_by_path(path, flags, holder);
  
        if (IS_ERR(bdev)) {
                ret = PTR_ERR(bdev);
  
        brelse(bh);
  error_close:
 -      close_bdev_exclusive(bdev, flags);
 +      blkdev_put(bdev, flags);
  error:
        mutex_unlock(&uuid_mutex);
        return ret;
@@@ -1300,8 -1299,8 +1300,8 @@@ int btrfs_rm_device(struct btrfs_root *
                        goto out;
                }
        } else {
 -              bdev = open_bdev_exclusive(device_path, FMODE_READ,
 -                                    root->fs_info->bdev_holder);
 +              bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
 +                                        root->fs_info->bdev_holder);
                if (IS_ERR(bdev)) {
                        ret = PTR_ERR(bdev);
                        goto out;
                root->fs_info->fs_devices->latest_bdev = next_device->bdev;
  
        if (device->bdev) {
 -              close_bdev_exclusive(device->bdev, device->mode);
 +              blkdev_put(device->bdev, device->mode);
                device->bdev = NULL;
                device->fs_devices->open_devices--;
        }
@@@ -1411,7 -1410,7 +1411,7 @@@ error_brelse
        brelse(bh);
  error_close:
        if (bdev)
 -              close_bdev_exclusive(bdev, FMODE_READ);
 +              blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
  out:
        mutex_unlock(&root->fs_info->volume_mutex);
        mutex_unlock(&uuid_mutex);
@@@ -1563,8 -1562,7 +1563,8 @@@ int btrfs_init_new_device(struct btrfs_
        if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
                return -EINVAL;
  
 -      bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
 +      bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
 +                                root->fs_info->bdev_holder);
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
  
  
        ret = find_next_devid(root, &device->devid);
        if (ret) {
+               kfree(device->name);
                kfree(device);
                goto error;
        }
  
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
+               kfree(device->name);
                kfree(device);
                ret = PTR_ERR(trans);
                goto error;
  
        lock_chunks(root);
  
 -      device->barriers = 1;
        device->writeable = 1;
        device->work.func = pending_bios_fn;
        generate_random_uuid(device->uuid);
@@@ -1696,7 -1697,7 +1698,7 @@@ out
        mutex_unlock(&root->fs_info->volume_mutex);
        return ret;
  error:
 -      close_bdev_exclusive(bdev, 0);
 +      blkdev_put(bdev, FMODE_EXCL);
        if (seeding_dev) {
                mutex_unlock(&uuid_mutex);
                up_write(&sb->s_umount);
@@@ -3394,6 -3395,7 +3396,6 @@@ static struct btrfs_device *add_missing
                return NULL;
        list_add(&device->dev_list,
                 &fs_devices->devices);
 -      device->barriers = 1;
        device->dev_root = root->fs_info->dev_root;
        device->devid = devid;
        device->work.func = pending_bios_fn;