Btrfs: allow unaligned DIO
[pandora-kernel.git] / fs / btrfs / inode.c
index 02bb099..6866c36 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/xattr.h>
 #include <linux/posix_acl.h>
 #include <linux/falloc.h>
+#include <linux/slab.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -251,6 +252,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
                                   inline_len, compressed_size,
                                   compressed_pages);
        BUG_ON(ret);
+       btrfs_delalloc_release_metadata(inode, end + 1 - start);
        btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
        return 0;
 }
@@ -413,6 +415,7 @@ again:
                trans = btrfs_join_transaction(root, 1);
                BUG_ON(!trans);
                btrfs_set_trans_block_group(trans, inode);
+               trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
                /* lets try to make an inline extent */
                if (ret || total_in < (actual_end - start)) {
@@ -438,7 +441,6 @@ again:
                             start, end, NULL,
                             EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
                             EXTENT_CLEAR_DELALLOC |
-                            EXTENT_CLEAR_ACCOUNTING |
                             EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
 
                        btrfs_end_transaction(trans, root);
@@ -696,6 +698,38 @@ retry:
        return 0;
 }
 
+static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
+                                     u64 num_bytes)
+{
+       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       struct extent_map *em;
+       u64 alloc_hint = 0;
+
+       read_lock(&em_tree->lock);
+       em = search_extent_mapping(em_tree, start, num_bytes);
+       if (em) {
+               /*
+                * if block start isn't an actual block number then find the
+                * first block in this inode and use that as a hint.  If that
+                * block is also bogus then just don't worry about it.
+                */
+               if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+                       free_extent_map(em);
+                       em = search_extent_mapping(em_tree, 0, 0);
+                       if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
+                               alloc_hint = em->block_start;
+                       if (em)
+                               free_extent_map(em);
+               } else {
+                       alloc_hint = em->block_start;
+                       free_extent_map(em);
+               }
+       }
+       read_unlock(&em_tree->lock);
+
+       return alloc_hint;
+}
+
 /*
  * when extent_io.c finds a delayed allocation range in the file,
  * the call backs end up in this code.  The basic idea is to
@@ -733,6 +767,7 @@ static noinline int cow_file_range(struct inode *inode,
        trans = btrfs_join_transaction(root, 1);
        BUG_ON(!trans);
        btrfs_set_trans_block_group(trans, inode);
+       trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
        actual_end = min_t(u64, isize, end + 1);
 
@@ -752,7 +787,6 @@ static noinline int cow_file_range(struct inode *inode,
                                     EXTENT_CLEAR_UNLOCK_PAGE |
                                     EXTENT_CLEAR_UNLOCK |
                                     EXTENT_CLEAR_DELALLOC |
-                                    EXTENT_CLEAR_ACCOUNTING |
                                     EXTENT_CLEAR_DIRTY |
                                     EXTENT_SET_WRITEBACK |
                                     EXTENT_END_WRITEBACK);
@@ -768,35 +802,13 @@ static noinline int cow_file_range(struct inode *inode,
        BUG_ON(disk_num_bytes >
               btrfs_super_total_bytes(&root->fs_info->super_copy));
 
-
-       read_lock(&BTRFS_I(inode)->extent_tree.lock);
-       em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
-                                  start, num_bytes);
-       if (em) {
-               /*
-                * if block start isn't an actual block number then find the
-                * first block in this inode and use that as a hint.  If that
-                * block is also bogus then just don't worry about it.
-                */
-               if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
-                       free_extent_map(em);
-                       em = search_extent_mapping(em_tree, 0, 0);
-                       if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
-                               alloc_hint = em->block_start;
-                       if (em)
-                               free_extent_map(em);
-               } else {
-                       alloc_hint = em->block_start;
-                       free_extent_map(em);
-               }
-       }
-       read_unlock(&BTRFS_I(inode)->extent_tree.lock);
+       alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
        btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
 
        while (disk_num_bytes > 0) {
                unsigned long op;
 
-               cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
+               cur_alloc_size = disk_num_bytes;
                ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
                                           root->sectorsize, 0, alloc_hint,
                                           (u64)-1, &ins, 1);
@@ -1173,6 +1185,13 @@ out_check:
                                               num_bytes, num_bytes, type);
                BUG_ON(ret);
 
+               if (root->root_key.objectid ==
+                   BTRFS_DATA_RELOC_TREE_OBJECTID) {
+                       ret = btrfs_reloc_clone_csums(inode, cur_offset,
+                                                     num_bytes);
+                       BUG_ON(ret);
+               }
+
                extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
                                cur_offset, cur_offset + num_bytes - 1,
                                locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
@@ -1225,36 +1244,13 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
 }
 
 static int btrfs_split_extent_hook(struct inode *inode,
-                                   struct extent_state *orig, u64 split)
+                                  struct extent_state *orig, u64 split)
 {
-       struct btrfs_root *root = BTRFS_I(inode)->root;
-       u64 size;
-
+       /* not delalloc, ignore it */
        if (!(orig->state & EXTENT_DELALLOC))
                return 0;
 
-       size = orig->end - orig->start + 1;
-       if (size > root->fs_info->max_extent) {
-               u64 num_extents;
-               u64 new_size;
-
-               new_size = orig->end - split + 1;
-               num_extents = div64_u64(size + root->fs_info->max_extent - 1,
-                                       root->fs_info->max_extent);
-
-               /*
-                * if we break a large extent up then leave oustanding_extents
-                * be, since we've already accounted for the large extent.
-                */
-               if (div64_u64(new_size + root->fs_info->max_extent - 1,
-                             root->fs_info->max_extent) < num_extents)
-                       return 0;
-       }
-
-       spin_lock(&BTRFS_I(inode)->accounting_lock);
-       BTRFS_I(inode)->outstanding_extents++;
-       spin_unlock(&BTRFS_I(inode)->accounting_lock);
-
+       atomic_inc(&BTRFS_I(inode)->outstanding_extents);
        return 0;
 }
 
@@ -1268,42 +1264,11 @@ static int btrfs_merge_extent_hook(struct inode *inode,
                                   struct extent_state *new,
                                   struct extent_state *other)
 {
-       struct btrfs_root *root = BTRFS_I(inode)->root;
-       u64 new_size, old_size;
-       u64 num_extents;
-
        /* not delalloc, ignore it */
        if (!(other->state & EXTENT_DELALLOC))
                return 0;
 
-       old_size = other->end - other->start + 1;
-       if (new->start < other->start)
-               new_size = other->end - new->start + 1;
-       else
-               new_size = new->end - other->start + 1;
-
-       /* we're not bigger than the max, unreserve the space and go */
-       if (new_size <= root->fs_info->max_extent) {
-               spin_lock(&BTRFS_I(inode)->accounting_lock);
-               BTRFS_I(inode)->outstanding_extents--;
-               spin_unlock(&BTRFS_I(inode)->accounting_lock);
-               return 0;
-       }
-
-       /*
-        * If we grew by another max_extent, just return, we want to keep that
-        * reserved amount.
-        */
-       num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
-                               root->fs_info->max_extent);
-       if (div64_u64(new_size + root->fs_info->max_extent - 1,
-                     root->fs_info->max_extent) > num_extents)
-               return 0;
-
-       spin_lock(&BTRFS_I(inode)->accounting_lock);
-       BTRFS_I(inode)->outstanding_extents--;
-       spin_unlock(&BTRFS_I(inode)->accounting_lock);
-
+       atomic_dec(&BTRFS_I(inode)->outstanding_extents);
        return 0;
 }
 
@@ -1312,8 +1277,8 @@ static int btrfs_merge_extent_hook(struct inode *inode,
  * bytes in this file, and to maintain the list of inodes that
  * have pending delalloc work to be done.
  */
-static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
-                      unsigned long old, unsigned long bits)
+static int btrfs_set_bit_hook(struct inode *inode,
+                             struct extent_state *state, int *bits)
 {
 
        /*
@@ -1321,16 +1286,18 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
         * but in this case, we are only testeing for the DELALLOC
         * bit, which is only set or cleared with irqs on
         */
-       if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
+       if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
+               u64 len = state->end + 1 - state->start;
+
+               if (*bits & EXTENT_FIRST_DELALLOC)
+                       *bits &= ~EXTENT_FIRST_DELALLOC;
+               else
+                       atomic_inc(&BTRFS_I(inode)->outstanding_extents);
 
-               spin_lock(&BTRFS_I(inode)->accounting_lock);
-               BTRFS_I(inode)->outstanding_extents++;
-               spin_unlock(&BTRFS_I(inode)->accounting_lock);
-               btrfs_delalloc_reserve_space(root, inode, end - start + 1);
                spin_lock(&root->fs_info->delalloc_lock);
-               BTRFS_I(inode)->delalloc_bytes += end - start + 1;
-               root->fs_info->delalloc_bytes += end - start + 1;
+               BTRFS_I(inode)->delalloc_bytes += len;
+               root->fs_info->delalloc_bytes += len;
                if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
                        list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
                                      &root->fs_info->delalloc_inodes);
@@ -1344,44 +1311,32 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
  * extent_io.c clear_bit_hook, see set_bit_hook for why
  */
 static int btrfs_clear_bit_hook(struct inode *inode,
-                               struct extent_state *state, unsigned long bits)
+                               struct extent_state *state, int *bits)
 {
        /*
         * set_bit and clear bit hooks normally require _irqsave/restore
         * but in this case, we are only testeing for the DELALLOC
         * bit, which is only set or cleared with irqs on
         */
-       if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
+       if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
+               u64 len = state->end + 1 - state->start;
 
-               if (bits & EXTENT_DO_ACCOUNTING) {
-                       spin_lock(&BTRFS_I(inode)->accounting_lock);
-                       BTRFS_I(inode)->outstanding_extents--;
-                       spin_unlock(&BTRFS_I(inode)->accounting_lock);
-                       btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
-               }
+               if (*bits & EXTENT_FIRST_DELALLOC)
+                       *bits &= ~EXTENT_FIRST_DELALLOC;
+               else if (!(*bits & EXTENT_DO_ACCOUNTING))
+                       atomic_dec(&BTRFS_I(inode)->outstanding_extents);
+
+               if (*bits & EXTENT_DO_ACCOUNTING)
+                       btrfs_delalloc_release_metadata(inode, len);
+
+               if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
+                       btrfs_free_reserved_data_space(inode, len);
 
                spin_lock(&root->fs_info->delalloc_lock);
-               if (state->end - state->start + 1 >
-                   root->fs_info->delalloc_bytes) {
-                       printk(KERN_INFO "btrfs warning: delalloc account "
-                              "%llu %llu\n",
-                              (unsigned long long)
-                              state->end - state->start + 1,
-                              (unsigned long long)
-                              root->fs_info->delalloc_bytes);
-                       btrfs_delalloc_free_space(root, inode, (u64)-1);
-                       root->fs_info->delalloc_bytes = 0;
-                       BTRFS_I(inode)->delalloc_bytes = 0;
-               } else {
-                       btrfs_delalloc_free_space(root, inode,
-                                                 state->end -
-                                                 state->start + 1);
-                       root->fs_info->delalloc_bytes -= state->end -
-                               state->start + 1;
-                       BTRFS_I(inode)->delalloc_bytes -= state->end -
-                               state->start + 1;
-               }
+               root->fs_info->delalloc_bytes -= len;
+               BTRFS_I(inode)->delalloc_bytes -= len;
+
                if (BTRFS_I(inode)->delalloc_bytes == 0 &&
                    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
                        list_del_init(&BTRFS_I(inode)->delalloc_inodes);
@@ -1430,7 +1385,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
  */
 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
                                    struct bio *bio, int mirror_num,
-                                   unsigned long bio_flags)
+                                   unsigned long bio_flags,
+                                   u64 bio_offset)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret = 0;
@@ -1449,7 +1405,8 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw,
  * are inserted into the btree
  */
 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
-                         int mirror_num, unsigned long bio_flags)
+                         int mirror_num, unsigned long bio_flags,
+                         u64 bio_offset)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        return btrfs_map_bio(root, rw, bio, mirror_num, 1);
@@ -1460,7 +1417,8 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
  * on write, or reading the csums from the tree before a read
  */
 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
-                         int mirror_num, unsigned long bio_flags)
+                         int mirror_num, unsigned long bio_flags,
+                         u64 bio_offset)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret = 0;
@@ -1485,7 +1443,8 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                /* we're doing a write, do the async checksumming */
                return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
                                   inode, rw, bio, mirror_num,
-                                  bio_flags, __btrfs_submit_bio_start,
+                                  bio_flags, bio_offset,
+                                  __btrfs_submit_bio_start,
                                   __btrfs_submit_bio_done);
        }
 
@@ -1566,6 +1525,7 @@ again:
                goto again;
        }
 
+       BUG();
        btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
        ClearPageChecked(page);
 out:
@@ -1696,7 +1656,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct btrfs_trans_handle *trans;
+       struct btrfs_trans_handle *trans = NULL;
        struct btrfs_ordered_extent *ordered_extent = NULL;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct extent_state *cached_state = NULL;
@@ -1714,9 +1674,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
                if (!ret) {
                        trans = btrfs_join_transaction(root, 1);
+                       btrfs_set_trans_block_group(trans, inode);
+                       trans->block_rsv = &root->fs_info->delalloc_block_rsv;
                        ret = btrfs_update_inode(trans, root, inode);
                        BUG_ON(ret);
-                       btrfs_end_transaction(trans, root);
                }
                goto out;
        }
@@ -1726,6 +1687,8 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                         0, &cached_state, GFP_NOFS);
 
        trans = btrfs_join_transaction(root, 1);
+       btrfs_set_trans_block_group(trans, inode);
+       trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
        if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
                compressed = 1;
@@ -1757,12 +1720,13 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        add_pending_csums(trans, inode, ordered_extent->file_offset,
                          &ordered_extent->list);
 
-       /* this also removes the ordered extent from the tree */
        btrfs_ordered_update_i_size(inode, 0, ordered_extent);
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
-       btrfs_end_transaction(trans, root);
 out:
+       btrfs_delalloc_release_metadata(inode, ordered_extent->len);
+       if (trans)
+               btrfs_end_transaction(trans, root);
        /* once for us */
        btrfs_put_ordered_extent(ordered_extent);
        /* once for the tree */
@@ -1884,7 +1848,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
 
        BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
                                                      failrec->last_mirror,
-                                                     failrec->bio_flags);
+                                                     failrec->bio_flags, 0);
        return 0;
 }
 
@@ -2038,33 +2002,197 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
        up_read(&root->fs_info->cleanup_work_sem);
 }
 
+/*
+ * calculate extra metadata reservation when snapshotting a subvolume
+ * contains orphan files.
+ */
+void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
+                               struct btrfs_pending_snapshot *pending,
+                               u64 *bytes_to_reserve)
+{
+       struct btrfs_root *root;
+       struct btrfs_block_rsv *block_rsv;
+       u64 num_bytes;
+       int index;
+
+       root = pending->root;
+       if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
+               return;
+
+       block_rsv = root->orphan_block_rsv;
+
+       /* orphan block reservation for the snapshot */
+       num_bytes = block_rsv->size;
+
+       /*
+        * after the snapshot is created, COWing tree blocks may use more
+        * space than it frees. So we should make sure there is enough
+        * reserved space.
+        */
+       index = trans->transid & 0x1;
+       if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
+               num_bytes += block_rsv->size -
+                            (block_rsv->reserved + block_rsv->freed[index]);
+       }
+
+       *bytes_to_reserve += num_bytes;
+}
+
+void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
+                               struct btrfs_pending_snapshot *pending)
+{
+       struct btrfs_root *root = pending->root;
+       struct btrfs_root *snap = pending->snap;
+       struct btrfs_block_rsv *block_rsv;
+       u64 num_bytes;
+       int index;
+       int ret;
+
+       if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
+               return;
+
+       /* refill source subvolume's orphan block reservation */
+       block_rsv = root->orphan_block_rsv;
+       index = trans->transid & 0x1;
+       if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
+               num_bytes = block_rsv->size -
+                           (block_rsv->reserved + block_rsv->freed[index]);
+               ret = btrfs_block_rsv_migrate(&pending->block_rsv,
+                                             root->orphan_block_rsv,
+                                             num_bytes);
+               BUG_ON(ret);
+       }
+
+       /* setup orphan block reservation for the snapshot */
+       block_rsv = btrfs_alloc_block_rsv(snap);
+       BUG_ON(!block_rsv);
+
+       btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
+       snap->orphan_block_rsv = block_rsv;
+
+       num_bytes = root->orphan_block_rsv->size;
+       ret = btrfs_block_rsv_migrate(&pending->block_rsv,
+                                     block_rsv, num_bytes);
+       BUG_ON(ret);
+
+#if 0
+       /* insert orphan item for the snapshot */
+       WARN_ON(!root->orphan_item_inserted);
+       ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
+                                      snap->root_key.objectid);
+       BUG_ON(ret);
+       snap->orphan_item_inserted = 1;
+#endif
+}
+
+enum btrfs_orphan_cleanup_state {
+       ORPHAN_CLEANUP_STARTED  = 1,
+       ORPHAN_CLEANUP_DONE     = 2,
+};
+
+/*
+ * This is called in transaction commmit time. If there are no orphan
+ * files in the subvolume, it removes orphan item and frees block_rsv
+ * structure.
+ */
+void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root)
+{
+       int ret;
+
+       if (!list_empty(&root->orphan_list) ||
+           root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
+               return;
+
+       if (root->orphan_item_inserted &&
+           btrfs_root_refs(&root->root_item) > 0) {
+               ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
+                                           root->root_key.objectid);
+               BUG_ON(ret);
+               root->orphan_item_inserted = 0;
+       }
+
+       if (root->orphan_block_rsv) {
+               WARN_ON(root->orphan_block_rsv->size > 0);
+               btrfs_free_block_rsv(root, root->orphan_block_rsv);
+               root->orphan_block_rsv = NULL;
+       }
+}
+
 /*
  * This creates an orphan entry for the given inode in case something goes
  * wrong in the middle of an unlink/truncate.
+ *
+ * NOTE: caller of this function should reserve 5 units of metadata for
+ *      this function.
  */
 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       int ret = 0;
+       struct btrfs_block_rsv *block_rsv = NULL;
+       int reserve = 0;
+       int insert = 0;
+       int ret;
 
-       spin_lock(&root->list_lock);
+       if (!root->orphan_block_rsv) {
+               block_rsv = btrfs_alloc_block_rsv(root);
+               BUG_ON(!block_rsv);
+       }
 
-       /* already on the orphan list, we're good */
-       if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
-               spin_unlock(&root->list_lock);
-               return 0;
+       spin_lock(&root->orphan_lock);
+       if (!root->orphan_block_rsv) {
+               root->orphan_block_rsv = block_rsv;
+       } else if (block_rsv) {
+               btrfs_free_block_rsv(root, block_rsv);
+               block_rsv = NULL;
        }
 
-       list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
+       if (list_empty(&BTRFS_I(inode)->i_orphan)) {
+               list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
+#if 0
+               /*
+                * For proper ENOSPC handling, we should do orphan
+                * cleanup when mounting. But this introduces backward
+                * compatibility issue.
+                */
+               if (!xchg(&root->orphan_item_inserted, 1))
+                       insert = 2;
+               else
+                       insert = 1;
+#endif
+               insert = 1;
+       } else {
+               WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved);
+       }
 
-       spin_unlock(&root->list_lock);
+       if (!BTRFS_I(inode)->orphan_meta_reserved) {
+               BTRFS_I(inode)->orphan_meta_reserved = 1;
+               reserve = 1;
+       }
+       spin_unlock(&root->orphan_lock);
 
-       /*
-        * insert an orphan item to track this unlinked/truncated file
-        */
-       ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
+       if (block_rsv)
+               btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
 
-       return ret;
+       /* grab metadata reservation from transaction handle */
+       if (reserve) {
+               ret = btrfs_orphan_reserve_metadata(trans, inode);
+               BUG_ON(ret);
+       }
+
+       /* insert an orphan item to track this unlinked/truncated file */
+       if (insert >= 1) {
+               ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
+               BUG_ON(ret);
+       }
+
+       /* insert an orphan item to track subvolume contains orphan files */
+       if (insert >= 2) {
+               ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
+                                              root->root_key.objectid);
+               BUG_ON(ret);
+       }
+       return 0;
 }
 
 /*
@@ -2074,26 +2202,31 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
+       int delete_item = 0;
+       int release_rsv = 0;
        int ret = 0;
 
-       spin_lock(&root->list_lock);
-
-       if (list_empty(&BTRFS_I(inode)->i_orphan)) {
-               spin_unlock(&root->list_lock);
-               return 0;
+       spin_lock(&root->orphan_lock);
+       if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
+               list_del_init(&BTRFS_I(inode)->i_orphan);
+               delete_item = 1;
        }
 
-       list_del_init(&BTRFS_I(inode)->i_orphan);
-       if (!trans) {
-               spin_unlock(&root->list_lock);
-               return 0;
+       if (BTRFS_I(inode)->orphan_meta_reserved) {
+               BTRFS_I(inode)->orphan_meta_reserved = 0;
+               release_rsv = 1;
        }
+       spin_unlock(&root->orphan_lock);
 
-       spin_unlock(&root->list_lock);
+       if (trans && delete_item) {
+               ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
+               BUG_ON(ret);
+       }
 
-       ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
+       if (release_rsv)
+               btrfs_orphan_release_metadata(inode);
 
-       return ret;
+       return 0;
 }
 
 /*
@@ -2110,7 +2243,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
        struct inode *inode;
        int ret = 0, nr_unlink = 0, nr_truncate = 0;
 
-       if (!xchg(&root->clean_orphans, 0))
+       if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
                return;
 
        path = btrfs_alloc_path();
@@ -2163,16 +2296,15 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
                found_key.type = BTRFS_INODE_ITEM_KEY;
                found_key.offset = 0;
                inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
-               if (IS_ERR(inode))
-                       break;
+               BUG_ON(IS_ERR(inode));
 
                /*
                 * add this inode to the orphan list so btrfs_orphan_del does
                 * the proper thing when we hit it
                 */
-               spin_lock(&root->list_lock);
+               spin_lock(&root->orphan_lock);
                list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
-               spin_unlock(&root->list_lock);
+               spin_unlock(&root->orphan_lock);
 
                /*
                 * if this is a bad inode, means we actually succeeded in
@@ -2181,7 +2313,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
                 * do a destroy_inode
                 */
                if (is_bad_inode(inode)) {
-                       trans = btrfs_start_transaction(root, 1);
+                       trans = btrfs_start_transaction(root, 0);
                        btrfs_orphan_del(trans, inode);
                        btrfs_end_transaction(trans, root);
                        iput(inode);
@@ -2199,13 +2331,23 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
                /* this will do delete_inode and everything for us */
                iput(inode);
        }
+       btrfs_free_path(path);
+
+       root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
+
+       if (root->orphan_block_rsv)
+               btrfs_block_rsv_release(root, root->orphan_block_rsv,
+                                       (u64)-1);
+
+       if (root->orphan_block_rsv || root->orphan_item_inserted) {
+               trans = btrfs_join_transaction(root, 1);
+               btrfs_end_transaction(trans, root);
+       }
 
        if (nr_unlink)
                printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
        if (nr_truncate)
                printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
-
-       btrfs_free_path(path);
 }
 
 /*
@@ -2524,85 +2666,258 @@ out:
        return ret;
 }
 
-static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
+/* helper to check if there is any shared block in the path */
+static int check_path_shared(struct btrfs_root *root,
+                            struct btrfs_path *path)
 {
-       struct btrfs_root *root;
-       struct btrfs_trans_handle *trans;
-       struct inode *inode = dentry->d_inode;
+       struct extent_buffer *eb;
+       int level;
        int ret;
-       unsigned long nr = 0;
-
-       root = BTRFS_I(dir)->root;
-
-       /*
-        * 5 items for unlink inode
-        * 1 for orphan
-        */
-       ret = btrfs_reserve_metadata_space(root, 6);
-       if (ret)
-               return ret;
+       u64 refs;
 
-       trans = btrfs_start_transaction(root, 1);
-       if (IS_ERR(trans)) {
-               btrfs_unreserve_metadata_space(root, 6);
-               return PTR_ERR(trans);
+       for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
+               if (!path->nodes[level])
+                       break;
+               eb = path->nodes[level];
+               if (!btrfs_block_can_be_shared(root, eb))
+                       continue;
+               ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
+                                              &refs, NULL);
+               if (refs > 1)
+                       return 1;
        }
-
-       btrfs_set_trans_block_group(trans, dir);
-
-       btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
-
-       ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
-                                dentry->d_name.name, dentry->d_name.len);
-
-       if (inode->i_nlink == 0)
-               ret = btrfs_orphan_add(trans, inode);
-
-       nr = trans->blocks_used;
-
-       btrfs_end_transaction_throttle(trans, root);
-       btrfs_unreserve_metadata_space(root, 6);
-       btrfs_btree_balance_dirty(root, nr);
-       return ret;
+       return 0;
 }
 
-int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
-                       struct btrfs_root *root,
-                       struct inode *dir, u64 objectid,
-                       const char *name, int name_len)
+/*
+ * helper to start transaction for unlink and rmdir.
+ *
+ * unlink and rmdir are special in btrfs, they do not always free space.
+ * so in enospc case, we should make sure they will free space before
+ * allowing them to use the global metadata reservation.
+ */
+static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
+                                                      struct dentry *dentry)
 {
+       struct btrfs_trans_handle *trans;
+       struct btrfs_root *root = BTRFS_I(dir)->root;
        struct btrfs_path *path;
-       struct extent_buffer *leaf;
+       struct btrfs_inode_ref *ref;
        struct btrfs_dir_item *di;
-       struct btrfs_key key;
+       struct inode *inode = dentry->d_inode;
        u64 index;
+       int check_link = 1;
+       int err = -ENOSPC;
        int ret;
 
-       path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
+       trans = btrfs_start_transaction(root, 10);
+       if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
+               return trans;
 
-       di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
-                                  name, name_len, -1);
-       BUG_ON(!di || IS_ERR(di));
+       if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+               return ERR_PTR(-ENOSPC);
 
-       leaf = path->nodes[0];
-       btrfs_dir_item_key_to_cpu(leaf, di, &key);
-       WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
-       ret = btrfs_delete_one_dir_name(trans, root, path, di);
-       BUG_ON(ret);
-       btrfs_release_path(root, path);
+       /* check if there is someone else holds reference */
+       if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
+               return ERR_PTR(-ENOSPC);
 
-       ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
-                                objectid, root->root_key.objectid,
-                                dir->i_ino, &index, name, name_len);
-       if (ret < 0) {
-               BUG_ON(ret != -ENOENT);
-               di = btrfs_search_dir_index_item(root, path, dir->i_ino,
-                                                name, name_len);
-               BUG_ON(!di || IS_ERR(di));
+       if (atomic_read(&inode->i_count) > 2)
+               return ERR_PTR(-ENOSPC);
 
-               leaf = path->nodes[0];
+       if (xchg(&root->fs_info->enospc_unlink, 1))
+               return ERR_PTR(-ENOSPC);
+
+       path = btrfs_alloc_path();
+       if (!path) {
+               root->fs_info->enospc_unlink = 0;
+               return ERR_PTR(-ENOMEM);
+       }
+
+       trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               btrfs_free_path(path);
+               root->fs_info->enospc_unlink = 0;
+               return trans;
+       }
+
+       path->skip_locking = 1;
+       path->search_commit_root = 1;
+
+       ret = btrfs_lookup_inode(trans, root, path,
+                               &BTRFS_I(dir)->location, 0);
+       if (ret < 0) {
+               err = ret;
+               goto out;
+       }
+       if (ret == 0) {
+               if (check_path_shared(root, path))
+                       goto out;
+       } else {
+               check_link = 0;
+       }
+       btrfs_release_path(root, path);
+
+       ret = btrfs_lookup_inode(trans, root, path,
+                               &BTRFS_I(inode)->location, 0);
+       if (ret < 0) {
+               err = ret;
+               goto out;
+       }
+       if (ret == 0) {
+               if (check_path_shared(root, path))
+                       goto out;
+       } else {
+               check_link = 0;
+       }
+       btrfs_release_path(root, path);
+
+       if (ret == 0 && S_ISREG(inode->i_mode)) {
+               ret = btrfs_lookup_file_extent(trans, root, path,
+                                              inode->i_ino, (u64)-1, 0);
+               if (ret < 0) {
+                       err = ret;
+                       goto out;
+               }
+               BUG_ON(ret == 0);
+               if (check_path_shared(root, path))
+                       goto out;
+               btrfs_release_path(root, path);
+       }
+
+       if (!check_link) {
+               err = 0;
+               goto out;
+       }
+
+       di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+                               dentry->d_name.name, dentry->d_name.len, 0);
+       if (IS_ERR(di)) {
+               err = PTR_ERR(di);
+               goto out;
+       }
+       if (di) {
+               if (check_path_shared(root, path))
+                       goto out;
+       } else {
+               err = 0;
+               goto out;
+       }
+       btrfs_release_path(root, path);
+
+       ref = btrfs_lookup_inode_ref(trans, root, path,
+                               dentry->d_name.name, dentry->d_name.len,
+                               inode->i_ino, dir->i_ino, 0);
+       if (IS_ERR(ref)) {
+               err = PTR_ERR(ref);
+               goto out;
+       }
+       BUG_ON(!ref);
+       if (check_path_shared(root, path))
+               goto out;
+       index = btrfs_inode_ref_index(path->nodes[0], ref);
+       btrfs_release_path(root, path);
+
+       di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
+                               dentry->d_name.name, dentry->d_name.len, 0);
+       if (IS_ERR(di)) {
+               err = PTR_ERR(di);
+               goto out;
+       }
+       BUG_ON(ret == -ENOENT);
+       if (check_path_shared(root, path))
+               goto out;
+
+       err = 0;
+out:
+       btrfs_free_path(path);
+       if (err) {
+               btrfs_end_transaction(trans, root);
+               root->fs_info->enospc_unlink = 0;
+               return ERR_PTR(err);
+       }
+
+       trans->block_rsv = &root->fs_info->global_block_rsv;
+       return trans;
+}
+
+static void __unlink_end_trans(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *root)
+{
+       if (trans->block_rsv == &root->fs_info->global_block_rsv) {
+               BUG_ON(!root->fs_info->enospc_unlink);
+               root->fs_info->enospc_unlink = 0;
+       }
+       btrfs_end_transaction_throttle(trans, root);
+}
+
+static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+       struct btrfs_root *root = BTRFS_I(dir)->root;
+       struct btrfs_trans_handle *trans;
+       struct inode *inode = dentry->d_inode;
+       int ret;
+       unsigned long nr = 0;
+
+       trans = __unlink_start_trans(dir, dentry);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
+
+       btrfs_set_trans_block_group(trans, dir);
+
+       btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
+
+       ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
+                                dentry->d_name.name, dentry->d_name.len);
+       BUG_ON(ret);
+
+       if (inode->i_nlink == 0) {
+               ret = btrfs_orphan_add(trans, inode);
+               BUG_ON(ret);
+       }
+
+       nr = trans->blocks_used;
+       __unlink_end_trans(trans, root);
+       btrfs_btree_balance_dirty(root, nr);
+       return ret;
+}
+
+int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+                       struct btrfs_root *root,
+                       struct inode *dir, u64 objectid,
+                       const char *name, int name_len)
+{
+       struct btrfs_path *path;
+       struct extent_buffer *leaf;
+       struct btrfs_dir_item *di;
+       struct btrfs_key key;
+       u64 index;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+                                  name, name_len, -1);
+       BUG_ON(!di || IS_ERR(di));
+
+       leaf = path->nodes[0];
+       btrfs_dir_item_key_to_cpu(leaf, di, &key);
+       WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
+       ret = btrfs_delete_one_dir_name(trans, root, path, di);
+       BUG_ON(ret);
+       btrfs_release_path(root, path);
+
+       ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
+                                objectid, root->root_key.objectid,
+                                dir->i_ino, &index, name, name_len);
+       if (ret < 0) {
+               BUG_ON(ret != -ENOENT);
+               di = btrfs_search_dir_index_item(root, path, dir->i_ino,
+                                                name, name_len);
+               BUG_ON(!di || IS_ERR(di));
+
+               leaf = path->nodes[0];
                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
                btrfs_release_path(root, path);
                index = key.offset;
@@ -2633,7 +2948,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
 {
        struct inode *inode = dentry->d_inode;
        int err = 0;
-       int ret;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct btrfs_trans_handle *trans;
        unsigned long nr = 0;
@@ -2642,15 +2956,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
            inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
                return -ENOTEMPTY;
 
-       ret = btrfs_reserve_metadata_space(root, 5);
-       if (ret)
-               return ret;
-
-       trans = btrfs_start_transaction(root, 1);
-       if (IS_ERR(trans)) {
-               btrfs_unreserve_metadata_space(root, 5);
+       trans = __unlink_start_trans(dir, dentry);
+       if (IS_ERR(trans))
                return PTR_ERR(trans);
-       }
 
        btrfs_set_trans_block_group(trans, dir);
 
@@ -2673,12 +2981,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
                btrfs_i_size_write(inode, 0);
 out:
        nr = trans->blocks_used;
-       ret = btrfs_end_transaction_throttle(trans, root);
-       btrfs_unreserve_metadata_space(root, 5);
+       __unlink_end_trans(trans, root);
        btrfs_btree_balance_dirty(root, nr);
 
-       if (ret && !err)
-               err = ret;
        return err;
 }
 
@@ -3075,6 +3380,7 @@ out:
        if (pending_del_nr) {
                ret = btrfs_del_items(trans, root, path, pending_del_slot,
                                      pending_del_nr);
+               BUG_ON(ret);
        }
        btrfs_free_path(path);
        return err;
@@ -3102,11 +3408,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
 
        if ((offset & (blocksize - 1)) == 0)
                goto out;
-       ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
-       if (ret)
-               goto out;
-
-       ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
+       ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
        if (ret)
                goto out;
 
@@ -3114,8 +3416,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
 again:
        page = grab_cache_page(mapping, index);
        if (!page) {
-               btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
-               btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
+               btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
                goto out;
        }
 
@@ -3178,8 +3479,7 @@ again:
 
 out_unlock:
        if (ret)
-               btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
-       btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
+               btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
        unlock_page(page);
        page_cache_release(page);
 out:
@@ -3191,7 +3491,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-       struct extent_map *em;
+       struct extent_map *em = NULL;
        struct extent_state *cached_state = NULL;
        u64 mask = root->sectorsize - 1;
        u64 hole_start = (inode->i_size + mask) & ~mask;
@@ -3229,11 +3529,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
                        u64 hint_byte = 0;
                        hole_size = last_byte - cur_offset;
 
-                       err = btrfs_reserve_metadata_space(root, 2);
-                       if (err)
+                       trans = btrfs_start_transaction(root, 2);
+                       if (IS_ERR(trans)) {
+                               err = PTR_ERR(trans);
                                break;
-
-                       trans = btrfs_start_transaction(root, 1);
+                       }
                        btrfs_set_trans_block_group(trans, inode);
 
                        err = btrfs_drop_extents(trans, inode, cur_offset,
@@ -3251,14 +3551,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
                                        last_byte - 1, 0);
 
                        btrfs_end_transaction(trans, root);
-                       btrfs_unreserve_metadata_space(root, 2);
                }
                free_extent_map(em);
+               em = NULL;
                cur_offset = last_byte;
                if (cur_offset >= block_end)
                        break;
        }
 
+       free_extent_map(em);
        unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
                             GFP_NOFS);
        return err;
@@ -3285,11 +3586,10 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
                }
        }
 
-       ret = btrfs_reserve_metadata_space(root, 1);
-       if (ret)
-               return ret;
+       trans = btrfs_start_transaction(root, 5);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
-       trans = btrfs_start_transaction(root, 1);
        btrfs_set_trans_block_group(trans, inode);
 
        ret = btrfs_orphan_add(trans, inode);
@@ -3297,7 +3597,6 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
 
        nr = trans->blocks_used;
        btrfs_end_transaction(trans, root);
-       btrfs_unreserve_metadata_space(root, 1);
        btrfs_btree_balance_dirty(root, nr);
 
        if (attr->ia_size > inode->i_size) {
@@ -3310,8 +3609,11 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
                i_size_write(inode, attr->ia_size);
                btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
 
-               trans = btrfs_start_transaction(root, 1);
+               trans = btrfs_start_transaction(root, 0);
+               BUG_ON(IS_ERR(trans));
                btrfs_set_trans_block_group(trans, inode);
+               trans->block_rsv = root->orphan_block_rsv;
+               BUG_ON(!trans->block_rsv);
 
                ret = btrfs_update_inode(trans, root, inode);
                BUG_ON(ret);
@@ -3391,10 +3693,21 @@ void btrfs_delete_inode(struct inode *inode)
        btrfs_i_size_write(inode, 0);
 
        while (1) {
-               trans = btrfs_start_transaction(root, 1);
+               trans = btrfs_start_transaction(root, 0);
+               BUG_ON(IS_ERR(trans));
                btrfs_set_trans_block_group(trans, inode);
-               ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
+               trans->block_rsv = root->orphan_block_rsv;
+
+               ret = btrfs_block_rsv_check(trans, root,
+                                           root->orphan_block_rsv, 0, 5);
+               if (ret) {
+                       BUG_ON(ret != -EAGAIN);
+                       ret = btrfs_commit_transaction(trans, root);
+                       BUG_ON(ret);
+                       continue;
+               }
 
+               ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
                if (ret != -EAGAIN)
                        break;
 
@@ -3402,6 +3715,7 @@ void btrfs_delete_inode(struct inode *inode)
                btrfs_end_transaction(trans, root);
                trans = NULL;
                btrfs_btree_balance_dirty(root, nr);
+
        }
 
        if (ret == 0) {
@@ -3642,40 +3956,10 @@ again:
        return 0;
 }
 
-static noinline void init_btrfs_i(struct inode *inode)
-{
-       struct btrfs_inode *bi = BTRFS_I(inode);
-
-       bi->generation = 0;
-       bi->sequence = 0;
-       bi->last_trans = 0;
-       bi->last_sub_trans = 0;
-       bi->logged_trans = 0;
-       bi->delalloc_bytes = 0;
-       bi->reserved_bytes = 0;
-       bi->disk_i_size = 0;
-       bi->flags = 0;
-       bi->index_cnt = (u64)-1;
-       bi->last_unlink_trans = 0;
-       bi->ordered_data_close = 0;
-       bi->force_compress = 0;
-       extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
-       extent_io_tree_init(&BTRFS_I(inode)->io_tree,
-                            inode->i_mapping, GFP_NOFS);
-       extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
-                            inode->i_mapping, GFP_NOFS);
-       INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
-       INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
-       RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
-       btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
-       mutex_init(&BTRFS_I(inode)->log_mutex);
-}
-
 static int btrfs_init_locked_inode(struct inode *inode, void *p)
 {
        struct btrfs_iget_args *args = p;
        inode->i_ino = args->ino;
-       init_btrfs_i(inode);
        BTRFS_I(inode)->root = args->root;
        btrfs_set_inode_space_info(args->root, inode);
        return 0;
@@ -3738,8 +4022,6 @@ static struct inode *new_simple_dir(struct super_block *s,
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
-       init_btrfs_i(inode);
-
        BTRFS_I(inode)->root = root;
        memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
        BTRFS_I(inode)->dummy_inode = 1;
@@ -3996,7 +4278,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
        struct btrfs_trans_handle *trans;
        int ret = 0;
 
-       if (root->fs_info->btree_inode == inode)
+       if (BTRFS_I(inode)->dummy_inode)
                return 0;
 
        if (wbc->sync_mode == WB_SYNC_ALL) {
@@ -4017,10 +4299,27 @@ void btrfs_dirty_inode(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
+       int ret;
+
+       if (BTRFS_I(inode)->dummy_inode)
+               return;
 
        trans = btrfs_join_transaction(root, 1);
        btrfs_set_trans_block_group(trans, inode);
-       btrfs_update_inode(trans, root, inode);
+
+       ret = btrfs_update_inode(trans, root, inode);
+       if (ret && ret == -ENOSPC) {
+               /* whoops, lets try again with the full transaction */
+               btrfs_end_transaction(trans, root);
+               trans = btrfs_start_transaction(root, 1);
+               btrfs_set_trans_block_group(trans, inode);
+
+               ret = btrfs_update_inode(trans, root, inode);
+               if (ret) {
+                       printk(KERN_ERR"btrfs: fail to dirty inode %lu error %d\n",
+                               inode->i_ino, ret);
+               }
+       }
        btrfs_end_transaction(trans, root);
 }
 
@@ -4138,7 +4437,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
         * btrfs_get_inode_index_count has an explanation for the magic
         * number
         */
-       init_btrfs_i(inode);
        BTRFS_I(inode)->index_cnt = 2;
        BTRFS_I(inode)->root = root;
        BTRFS_I(inode)->generation = trans->transid;
@@ -4302,26 +4600,21 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        if (!new_valid_dev(rdev))
                return -EINVAL;
 
+       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
+       if (err)
+               return err;
+
        /*
         * 2 for inode item and ref
         * 2 for dir items
         * 1 for xattr if selinux is on
         */
-       err = btrfs_reserve_metadata_space(root, 5);
-       if (err)
-               return err;
+       trans = btrfs_start_transaction(root, 5);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
-       trans = btrfs_start_transaction(root, 1);
-       if (!trans)
-               goto fail;
        btrfs_set_trans_block_group(trans, dir);
 
-       err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
-       if (err) {
-               err = -ENOSPC;
-               goto out_unlock;
-       }
-
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
                                dentry->d_name.len,
                                dentry->d_parent->d_inode->i_ino, objectid,
@@ -4350,13 +4643,11 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
 out_unlock:
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
-fail:
-       btrfs_unreserve_metadata_space(root, 5);
+       btrfs_btree_balance_dirty(root, nr);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
        }
-       btrfs_btree_balance_dirty(root, nr);
        return err;
 }
 
@@ -4366,32 +4657,26 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct inode *inode = NULL;
-       int err;
        int drop_inode = 0;
+       int err;
        unsigned long nr = 0;
        u64 objectid;
        u64 index = 0;
 
+       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
+       if (err)
+               return err;
        /*
         * 2 for inode item and ref
         * 2 for dir items
         * 1 for xattr if selinux is on
         */
-       err = btrfs_reserve_metadata_space(root, 5);
-       if (err)
-               return err;
+       trans = btrfs_start_transaction(root, 5);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
-       trans = btrfs_start_transaction(root, 1);
-       if (!trans)
-               goto fail;
        btrfs_set_trans_block_group(trans, dir);
 
-       err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
-       if (err) {
-               err = -ENOSPC;
-               goto out_unlock;
-       }
-
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
                                dentry->d_name.len,
                                dentry->d_parent->d_inode->i_ino,
@@ -4423,8 +4708,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
 out_unlock:
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
-fail:
-       btrfs_unreserve_metadata_space(root, 5);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -4451,21 +4734,21 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        if (root->objectid != BTRFS_I(inode)->root->objectid)
                return -EPERM;
 
-       /*
-        * 1 item for inode ref
-        * 2 items for dir items
-        */
-       err = btrfs_reserve_metadata_space(root, 3);
-       if (err)
-               return err;
-
        btrfs_inc_nlink(inode);
 
        err = btrfs_set_inode_index(dir, &index);
        if (err)
                goto fail;
 
-       trans = btrfs_start_transaction(root, 1);
+       /*
+        * 1 item for inode ref
+        * 2 items for dir items
+        */
+       trans = btrfs_start_transaction(root, 3);
+       if (IS_ERR(trans)) {
+               err = PTR_ERR(trans);
+               goto fail;
+       }
 
        btrfs_set_trans_block_group(trans, dir);
        atomic_inc(&inode->i_count);
@@ -4484,7 +4767,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
 fail:
-       btrfs_unreserve_metadata_space(root, 3);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -4504,28 +4786,20 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        u64 index = 0;
        unsigned long nr = 1;
 
+       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
+       if (err)
+               return err;
+
        /*
         * 2 items for inode and ref
         * 2 items for dir items
         * 1 for xattr if selinux is on
         */
-       err = btrfs_reserve_metadata_space(root, 5);
-       if (err)
-               return err;
-
-       trans = btrfs_start_transaction(root, 1);
-       if (!trans) {
-               err = -ENOMEM;
-               goto out_unlock;
-       }
+       trans = btrfs_start_transaction(root, 5);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
        btrfs_set_trans_block_group(trans, dir);
 
-       err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
-       if (err) {
-               err = -ENOSPC;
-               goto out_fail;
-       }
-
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
                                dentry->d_name.len,
                                dentry->d_parent->d_inode->i_ino, objectid,
@@ -4565,9 +4839,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 out_fail:
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
-
-out_unlock:
-       btrfs_unreserve_metadata_space(root, 5);
        if (drop_on_err)
                iput(inode);
        btrfs_btree_balance_dirty(root, nr);
@@ -4825,6 +5096,7 @@ again:
                        }
                        flush_dcache_page(page);
                } else if (create && PageUptodate(page)) {
+                       WARN_ON(1);
                        if (!trans) {
                                kunmap(page);
                                free_extent_map(em);
@@ -4921,15 +5193,655 @@ out:
        return em;
 }
 
-static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
+                                                 u64 start, u64 len)
 {
-       return -EINVAL;
-}
-
-static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
-               __u64 start, __u64 len)
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_trans_handle *trans;
+       struct extent_map *em;
+       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       struct btrfs_key ins;
+       u64 alloc_hint;
+       int ret;
+
+       btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+
+       trans = btrfs_join_transaction(root, 0);
+       if (!trans)
+               return ERR_PTR(-ENOMEM);
+
+       trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+
+       alloc_hint = get_extent_allocation_hint(inode, start, len);
+       ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
+                                  alloc_hint, (u64)-1, &ins, 1);
+       if (ret) {
+               em = ERR_PTR(ret);
+               goto out;
+       }
+
+       em = alloc_extent_map(GFP_NOFS);
+       if (!em) {
+               em = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       em->start = start;
+       em->orig_start = em->start;
+       em->len = ins.offset;
+
+       em->block_start = ins.objectid;
+       em->block_len = ins.offset;
+       em->bdev = root->fs_info->fs_devices->latest_bdev;
+       set_bit(EXTENT_FLAG_PINNED, &em->flags);
+
+       while (1) {
+               write_lock(&em_tree->lock);
+               ret = add_extent_mapping(em_tree, em);
+               write_unlock(&em_tree->lock);
+               if (ret != -EEXIST)
+                       break;
+               btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
+       }
+
+       ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
+                                          ins.offset, ins.offset, 0);
+       if (ret) {
+               btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
+               em = ERR_PTR(ret);
+       }
+out:
+       btrfs_end_transaction(trans, root);
+       return em;
+}
+
+/*
+ * returns 1 when the nocow is safe, < 1 on error, 0 if the
+ * block must be cow'd
+ */
+static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
+                                     struct inode *inode, u64 offset, u64 len)
+{
+       struct btrfs_path *path;
+       int ret;
+       struct extent_buffer *leaf;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_file_extent_item *fi;
+       struct btrfs_key key;
+       u64 disk_bytenr;
+       u64 backref_offset;
+       u64 extent_end;
+       u64 num_bytes;
+       int slot;
+       int found_type;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+                                      offset, 0);
+       if (ret < 0)
+               goto out;
+
+       slot = path->slots[0];
+       if (ret == 1) {
+               if (slot == 0) {
+                       /* can't find the item, must cow */
+                       ret = 0;
+                       goto out;
+               }
+               slot--;
+       }
+       ret = 0;
+       leaf = path->nodes[0];
+       btrfs_item_key_to_cpu(leaf, &key, slot);
+       if (key.objectid != inode->i_ino ||
+           key.type != BTRFS_EXTENT_DATA_KEY) {
+               /* not our file or wrong item type, must cow */
+               goto out;
+       }
+
+       if (key.offset > offset) {
+               /* Wrong offset, must cow */
+               goto out;
+       }
+
+       fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+       found_type = btrfs_file_extent_type(leaf, fi);
+       if (found_type != BTRFS_FILE_EXTENT_REG &&
+           found_type != BTRFS_FILE_EXTENT_PREALLOC) {
+               /* not a regular extent, must cow */
+               goto out;
+       }
+       disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+       backref_offset = btrfs_file_extent_offset(leaf, fi);
+
+       extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
+       if (extent_end < offset + len) {
+               /* extent doesn't include our full range, must cow */
+               goto out;
+       }
+
+       if (btrfs_extent_readonly(root, disk_bytenr))
+               goto out;
+
+       /*
+        * look for other files referencing this extent, if we
+        * find any we must cow
+        */
+       if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+                                 key.offset - backref_offset, disk_bytenr))
+               goto out;
+
+       /*
+        * adjust disk_bytenr and num_bytes to cover just the bytes
+        * in this extent we are about to write.  If there
+        * are any csums in that range we have to cow in order
+        * to keep the csums correct
+        */
+       disk_bytenr += backref_offset;
+       disk_bytenr += offset - key.offset;
+       num_bytes = min(offset + len, extent_end) - offset;
+       if (csum_exist_in_range(root, disk_bytenr, num_bytes))
+                               goto out;
+       /*
+        * all of the above have passed, it is safe to overwrite this extent
+        * without cow
+        */
+       ret = 1;
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
+                                  struct buffer_head *bh_result, int create)
+{
+       struct extent_map *em;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       u64 start = iblock << inode->i_blkbits;
+       u64 len = bh_result->b_size;
+       struct btrfs_trans_handle *trans;
+
+       em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
+       if (IS_ERR(em))
+               return PTR_ERR(em);
+
+       /*
+        * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
+        * io.  INLINE is special, and we could probably kludge it in here, but
+        * it's still buffered so for safety lets just fall back to the generic
+        * buffered path.
+        *
+        * For COMPRESSED we _have_ to read the entire extent in so we can
+        * decompress it, so there will be buffering required no matter what we
+        * do, so go ahead and fallback to buffered.
+        *
+        * We return -ENOTBLK because thats what makes DIO go ahead and go back
+        * to buffered IO.  Don't blame me, this is the price we pay for using
+        * the generic code.
+        */
+       if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
+           em->block_start == EXTENT_MAP_INLINE) {
+               free_extent_map(em);
+               return -ENOTBLK;
+       }
+
+       /* Just a good old fashioned hole, return */
+       if (!create && (em->block_start == EXTENT_MAP_HOLE ||
+                       test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
+               free_extent_map(em);
+               /* DIO will do one hole at a time, so just unlock a sector */
+               unlock_extent(&BTRFS_I(inode)->io_tree, start,
+                             start + root->sectorsize - 1, GFP_NOFS);
+               return 0;
+       }
+
+       /*
+        * We don't allocate a new extent in the following cases
+        *
+        * 1) The inode is marked as NODATACOW.  In this case we'll just use the
+        * existing extent.
+        * 2) The extent is marked as PREALLOC.  We're good to go here and can
+        * just use the extent.
+        *
+        */
+       if (!create) {
+               len = em->len - (start - em->start);
+               goto map;
+       }
+
+       if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
+           ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
+            em->block_start != EXTENT_MAP_HOLE)) {
+               int type;
+               int ret;
+               u64 block_start;
+
+               if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+                       type = BTRFS_ORDERED_PREALLOC;
+               else
+                       type = BTRFS_ORDERED_NOCOW;
+               len = min(len, em->len - (start - em->start));
+               block_start = em->block_start + (start - em->start);
+
+               /*
+                * we're not going to log anything, but we do need
+                * to make sure the current transaction stays open
+                * while we look for nocow cross refs
+                */
+               trans = btrfs_join_transaction(root, 0);
+               if (!trans)
+                       goto must_cow;
+
+               if (can_nocow_odirect(trans, inode, start, len) == 1) {
+                       ret = btrfs_add_ordered_extent_dio(inode, start,
+                                          block_start, len, len, type);
+                       btrfs_end_transaction(trans, root);
+                       if (ret) {
+                               free_extent_map(em);
+                               return ret;
+                       }
+                       goto unlock;
+               }
+               btrfs_end_transaction(trans, root);
+       }
+must_cow:
+       /*
+        * this will cow the extent, reset the len in case we changed
+        * it above
+        */
+       len = bh_result->b_size;
+       free_extent_map(em);
+       em = btrfs_new_extent_direct(inode, start, len);
+       if (IS_ERR(em))
+               return PTR_ERR(em);
+       len = min(len, em->len - (start - em->start));
+unlock:
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
+                         EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
+                         0, NULL, GFP_NOFS);
+map:
+       bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
+               inode->i_blkbits;
+       bh_result->b_size = len;
+       bh_result->b_bdev = em->bdev;
+       set_buffer_mapped(bh_result);
+       if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+               set_buffer_new(bh_result);
+
+       free_extent_map(em);
+
+       return 0;
+}
+
+struct btrfs_dio_private {
+       struct inode *inode;
+       u64 logical_offset;
+       u64 disk_bytenr;
+       u64 bytes;
+       u32 *csums;
+       void *private;
+};
+
+static void btrfs_endio_direct_read(struct bio *bio, int err)
+{
+       struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec = bio->bi_io_vec;
+       struct btrfs_dio_private *dip = bio->bi_private;
+       struct inode *inode = dip->inode;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       u64 start;
+       u32 *private = dip->csums;
+
+       start = dip->logical_offset;
+       do {
+               if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+                       struct page *page = bvec->bv_page;
+                       char *kaddr;
+                       u32 csum = ~(u32)0;
+                       unsigned long flags;
+
+                       local_irq_save(flags);
+                       kaddr = kmap_atomic(page, KM_IRQ0);
+                       csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
+                                              csum, bvec->bv_len);
+                       btrfs_csum_final(csum, (char *)&csum);
+                       kunmap_atomic(kaddr, KM_IRQ0);
+                       local_irq_restore(flags);
+
+                       flush_dcache_page(bvec->bv_page);
+                       if (csum != *private) {
+                               printk(KERN_ERR "btrfs csum failed ino %lu off"
+                                     " %llu csum %u private %u\n",
+                                     inode->i_ino, (unsigned long long)start,
+                                     csum, *private);
+                               err = -EIO;
+                       }
+               }
+
+               start += bvec->bv_len;
+               private++;
+               bvec++;
+       } while (bvec <= bvec_end);
+
+       unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
+                     dip->logical_offset + dip->bytes - 1, GFP_NOFS);
+       bio->bi_private = dip->private;
+
+       kfree(dip->csums);
+       kfree(dip);
+       dio_end_io(bio, err);
+}
+
+static void btrfs_endio_direct_write(struct bio *bio, int err)
+{
+       struct btrfs_dio_private *dip = bio->bi_private;
+       struct inode *inode = dip->inode;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_trans_handle *trans;
+       struct btrfs_ordered_extent *ordered = NULL;
+       struct extent_state *cached_state = NULL;
+       int ret;
+
+       if (err)
+               goto out_done;
+
+       ret = btrfs_dec_test_ordered_pending(inode, &ordered,
+                                            dip->logical_offset, dip->bytes);
+       if (!ret)
+               goto out_done;
+
+       BUG_ON(!ordered);
+
+       trans = btrfs_join_transaction(root, 1);
+       if (!trans) {
+               err = -ENOMEM;
+               goto out;
+       }
+       trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+
+       if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
+               ret = btrfs_ordered_update_i_size(inode, 0, ordered);
+               if (!ret)
+                       ret = btrfs_update_inode(trans, root, inode);
+               err = ret;
+               goto out;
+       }
+
+       lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
+                        ordered->file_offset + ordered->len - 1, 0,
+                        &cached_state, GFP_NOFS);
+
+       if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
+               ret = btrfs_mark_extent_written(trans, inode,
+                                               ordered->file_offset,
+                                               ordered->file_offset +
+                                               ordered->len);
+               if (ret) {
+                       err = ret;
+                       goto out_unlock;
+               }
+       } else {
+               ret = insert_reserved_file_extent(trans, inode,
+                                                 ordered->file_offset,
+                                                 ordered->start,
+                                                 ordered->disk_len,
+                                                 ordered->len,
+                                                 ordered->len,
+                                                 0, 0, 0,
+                                                 BTRFS_FILE_EXTENT_REG);
+               unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
+                                  ordered->file_offset, ordered->len);
+               if (ret) {
+                       err = ret;
+                       WARN_ON(1);
+                       goto out_unlock;
+               }
+       }
+
+       add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
+       btrfs_ordered_update_i_size(inode, 0, ordered);
+       btrfs_update_inode(trans, root, inode);
+out_unlock:
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
+                            ordered->file_offset + ordered->len - 1,
+                            &cached_state, GFP_NOFS);
+out:
+       btrfs_delalloc_release_metadata(inode, ordered->len);
+       btrfs_end_transaction(trans, root);
+       btrfs_put_ordered_extent(ordered);
+       btrfs_put_ordered_extent(ordered);
+out_done:
+       bio->bi_private = dip->private;
+
+       kfree(dip->csums);
+       kfree(dip);
+       dio_end_io(bio, err);
+}
+
+static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
+                                   struct bio *bio, int mirror_num,
+                                   unsigned long bio_flags, u64 offset)
+{
+       int ret;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
+       BUG_ON(ret);
+       return 0;
+}
+
+static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
+                               loff_t file_offset)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_dio_private *dip;
+       struct bio_vec *bvec = bio->bi_io_vec;
+       u64 start;
+       int skip_sum;
+       int write = rw & (1 << BIO_RW);
+       int ret = 0;
+
+       skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
+
+       dip = kmalloc(sizeof(*dip), GFP_NOFS);
+       if (!dip) {
+               ret = -ENOMEM;
+               goto free_ordered;
+       }
+       dip->csums = NULL;
+
+       if (!skip_sum) {
+               dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
+               if (!dip->csums) {
+                       ret = -ENOMEM;
+                       goto free_ordered;
+               }
+       }
+
+       dip->private = bio->bi_private;
+       dip->inode = inode;
+       dip->logical_offset = file_offset;
+
+       start = dip->logical_offset;
+       dip->bytes = 0;
+       do {
+               dip->bytes += bvec->bv_len;
+               bvec++;
+       } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
+
+       dip->disk_bytenr = (u64)bio->bi_sector << 9;
+       bio->bi_private = dip;
+
+       if (write)
+               bio->bi_end_io = btrfs_endio_direct_write;
+       else
+               bio->bi_end_io = btrfs_endio_direct_read;
+
+       ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+       if (ret)
+               goto out_err;
+
+       if (write && !skip_sum) {
+               ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
+                                  inode, rw, bio, 0, 0,
+                                  dip->logical_offset,
+                                  __btrfs_submit_bio_start_direct_io,
+                                  __btrfs_submit_bio_done);
+               if (ret)
+                       goto out_err;
+               return;
+       } else if (!skip_sum)
+               btrfs_lookup_bio_sums_dio(root, inode, bio,
+                                         dip->logical_offset, dip->csums);
+
+       ret = btrfs_map_bio(root, rw, bio, 0, 1);
+       if (ret)
+               goto out_err;
+       return;
+out_err:
+       kfree(dip->csums);
+       kfree(dip);
+free_ordered:
+       /*
+        * If this is a write, we need to clean up the reserved space and kill
+        * the ordered extent.
+        */
+       if (write) {
+               struct btrfs_ordered_extent *ordered;
+               ordered = btrfs_lookup_ordered_extent(inode,
+                                                     dip->logical_offset);
+               if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
+                   !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
+                       btrfs_free_reserved_extent(root, ordered->start,
+                                                  ordered->disk_len);
+               btrfs_put_ordered_extent(ordered);
+               btrfs_put_ordered_extent(ordered);
+       }
+       bio_endio(bio, ret);
+}
+
+static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
+                       const struct iovec *iov, loff_t offset,
+                       unsigned long nr_segs)
+{
+       int seg;
+       size_t size;
+       unsigned long addr;
+       unsigned blocksize_mask = root->sectorsize - 1;
+       ssize_t retval = -EINVAL;
+       loff_t end = offset;
+
+       if (offset & blocksize_mask)
+               goto out;
+
+       /* Check the memory alignment.  Blocks cannot straddle pages */
+       for (seg = 0; seg < nr_segs; seg++) {
+               addr = (unsigned long)iov[seg].iov_base;
+               size = iov[seg].iov_len;
+               end += size;
+               if ((addr & blocksize_mask) || (size & blocksize_mask)) 
+                       goto out;
+       }
+       retval = 0;
+out:
+       return retval;
+}
+static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
+                       const struct iovec *iov, loff_t offset,
+                       unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       struct btrfs_ordered_extent *ordered;
+       struct extent_state *cached_state = NULL;
+       u64 lockstart, lockend;
+       ssize_t ret;
+       int writing = rw & WRITE;
+       int write_bits = 0;
+       size_t count = iov_length(iov, nr_segs);
+
+       if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
+                           offset, nr_segs)) {
+               return 0;
+       }
+
+       lockstart = offset;
+       lockend = offset + count - 1;
+
+       if (writing) {
+               ret = btrfs_delalloc_reserve_space(inode, count);
+               if (ret)
+                       goto out;
+       }
+
+       while (1) {
+               lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                0, &cached_state, GFP_NOFS);
+               /*
+                * We're concerned with the entire range that we're going to be
+                * doing DIO to, so we need to make sure theres no ordered
+                * extents in this range.
+                */
+               ordered = btrfs_lookup_ordered_range(inode, lockstart,
+                                                    lockend - lockstart + 1);
+               if (!ordered)
+                       break;
+               unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                    &cached_state, GFP_NOFS);
+               btrfs_start_ordered_extent(inode, ordered, 1);
+               btrfs_put_ordered_extent(ordered);
+               cond_resched();
+       }
+
+       /*
+        * we don't use btrfs_set_extent_delalloc because we don't want
+        * the dirty or uptodate bits
+        */
+       if (writing) {
+               write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
+               ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                    EXTENT_DELALLOC, 0, NULL, &cached_state,
+                                    GFP_NOFS);
+               if (ret) {
+                       clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+                                        lockend, EXTENT_LOCKED | write_bits,
+                                        1, 0, &cached_state, GFP_NOFS);
+                       goto out;
+               }
+       }
+
+       free_extent_state(cached_state);
+       cached_state = NULL;
+
+       ret = __blockdev_direct_IO(rw, iocb, inode,
+                  BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+                  iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
+                  btrfs_submit_direct, 0);
+
+       if (ret < 0 && ret != -EIOCBQUEUED) {
+               clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
+                             offset + iov_length(iov, nr_segs) - 1,
+                             EXTENT_LOCKED | write_bits, 1, 0,
+                             &cached_state, GFP_NOFS);
+       } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
+               /*
+                * We're falling back to buffered, unlock the section we didn't
+                * do IO on.
+                */
+               clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
+                             offset + iov_length(iov, nr_segs) - 1,
+                             EXTENT_LOCKED | write_bits, 1, 0,
+                             &cached_state, GFP_NOFS);
+       }
+out:
+       free_extent_state(cached_state);
+       return ret;
+}
+
+static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+               __u64 start, __u64 len)
 {
        return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
 }
@@ -5089,7 +6001,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        u64 page_start;
        u64 page_end;
 
-       ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
+       ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
        if (ret) {
                if (ret == -ENOMEM)
                        ret = VM_FAULT_OOM;
@@ -5098,13 +6010,6 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                goto out;
        }
 
-       ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
-       if (ret) {
-               btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
-               ret = VM_FAULT_SIGBUS;
-               goto out;
-       }
-
        ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
 again:
        lock_page(page);
@@ -5114,7 +6019,6 @@ again:
 
        if ((page->mapping != inode->i_mapping) ||
            (page_start >= size)) {
-               btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
                /* page got truncated out from underneath us */
                goto out_unlock;
        }
@@ -5155,7 +6059,6 @@ again:
                unlock_extent_cached(io_tree, page_start, page_end,
                                     &cached_state, GFP_NOFS);
                ret = VM_FAULT_SIGBUS;
-               btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
                goto out_unlock;
        }
        ret = 0;
@@ -5182,10 +6085,10 @@ again:
        unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
 
 out_unlock:
-       btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
        if (!ret)
                return VM_FAULT_LOCKED;
        unlock_page(page);
+       btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
 out:
        return ret;
 }
@@ -5210,8 +6113,10 @@ static void btrfs_truncate(struct inode *inode)
        btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
        btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
 
-       trans = btrfs_start_transaction(root, 1);
+       trans = btrfs_start_transaction(root, 0);
+       BUG_ON(IS_ERR(trans));
        btrfs_set_trans_block_group(trans, inode);
+       trans->block_rsv = root->orphan_block_rsv;
 
        /*
         * setattr is responsible for setting the ordered_data_close flag,
@@ -5234,6 +6139,23 @@ static void btrfs_truncate(struct inode *inode)
                btrfs_add_ordered_operation(trans, root, inode);
 
        while (1) {
+               if (!trans) {
+                       trans = btrfs_start_transaction(root, 0);
+                       BUG_ON(IS_ERR(trans));
+                       btrfs_set_trans_block_group(trans, inode);
+                       trans->block_rsv = root->orphan_block_rsv;
+               }
+
+               ret = btrfs_block_rsv_check(trans, root,
+                                           root->orphan_block_rsv, 0, 5);
+               if (ret) {
+                       BUG_ON(ret != -EAGAIN);
+                       ret = btrfs_commit_transaction(trans, root);
+                       BUG_ON(ret);
+                       trans = NULL;
+                       continue;
+               }
+
                ret = btrfs_truncate_inode_items(trans, root, inode,
                                                 inode->i_size,
                                                 BTRFS_EXTENT_DATA_KEY);
@@ -5245,10 +6167,8 @@ static void btrfs_truncate(struct inode *inode)
 
                nr = trans->blocks_used;
                btrfs_end_transaction(trans, root);
+               trans = NULL;
                btrfs_btree_balance_dirty(root, nr);
-
-               trans = btrfs_start_transaction(root, 1);
-               btrfs_set_trans_block_group(trans, inode);
        }
 
        if (ret == 0 && inode->i_nlink > 0) {
@@ -5309,21 +6229,47 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
 struct inode *btrfs_alloc_inode(struct super_block *sb)
 {
        struct btrfs_inode *ei;
+       struct inode *inode;
 
        ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
        if (!ei)
                return NULL;
+
+       ei->root = NULL;
+       ei->space_info = NULL;
+       ei->generation = 0;
+       ei->sequence = 0;
        ei->last_trans = 0;
        ei->last_sub_trans = 0;
        ei->logged_trans = 0;
-       ei->outstanding_extents = 0;
-       ei->reserved_extents = 0;
-       ei->root = NULL;
+       ei->delalloc_bytes = 0;
+       ei->reserved_bytes = 0;
+       ei->disk_i_size = 0;
+       ei->flags = 0;
+       ei->index_cnt = (u64)-1;
+       ei->last_unlink_trans = 0;
+
        spin_lock_init(&ei->accounting_lock);
+       atomic_set(&ei->outstanding_extents, 0);
+       ei->reserved_extents = 0;
+
+       ei->ordered_data_close = 0;
+       ei->orphan_meta_reserved = 0;
+       ei->dummy_inode = 0;
+       ei->force_compress = 0;
+
+       inode = &ei->vfs_inode;
+       extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
+       extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
+       extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
+       mutex_init(&ei->log_mutex);
        btrfs_ordered_inode_tree_init(&ei->ordered_tree);
        INIT_LIST_HEAD(&ei->i_orphan);
+       INIT_LIST_HEAD(&ei->delalloc_inodes);
        INIT_LIST_HEAD(&ei->ordered_operations);
-       return &ei->vfs_inode;
+       RB_CLEAR_NODE(&ei->rb_node);
+
+       return inode;
 }
 
 void btrfs_destroy_inode(struct inode *inode)
@@ -5333,6 +6279,8 @@ void btrfs_destroy_inode(struct inode *inode)
 
        WARN_ON(!list_empty(&inode->i_dentry));
        WARN_ON(inode->i_data.nrpages);
+       WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
+       WARN_ON(BTRFS_I(inode)->reserved_extents);
 
        /*
         * This can happen where we create an inode, but somebody else also
@@ -5353,13 +6301,13 @@ void btrfs_destroy_inode(struct inode *inode)
                spin_unlock(&root->fs_info->ordered_extent_lock);
        }
 
-       spin_lock(&root->list_lock);
+       spin_lock(&root->orphan_lock);
        if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
                printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
                       inode->i_ino);
                list_del_init(&BTRFS_I(inode)->i_orphan);
        }
-       spin_unlock(&root->list_lock);
+       spin_unlock(&root->orphan_lock);
 
        while (1) {
                ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
@@ -5384,7 +6332,6 @@ free:
 void btrfs_drop_inode(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
-
        if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
                generic_delete_inode(inode);
        else
@@ -5481,19 +6428,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (S_ISDIR(old_inode->i_mode) && new_inode &&
            new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
                return -ENOTEMPTY;
-
-       /*
-        * We want to reserve the absolute worst case amount of items.  So if
-        * both inodes are subvols and we need to unlink them then that would
-        * require 4 item modifications, but if they are both normal inodes it
-        * would require 5 item modifications, so we'll assume their normal
-        * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
-        * should cover the worst case number of items we'll modify.
-        */
-       ret = btrfs_reserve_metadata_space(root, 11);
-       if (ret)
-               return ret;
-
        /*
         * we're using rename to replace one file with another.
         * and the replacement file is large.  Start IO on it now so
@@ -5506,8 +6440,18 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        /* close the racy window with snapshot create/destroy ioctl */
        if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
                down_read(&root->fs_info->subvol_sem);
+       /*
+        * We want to reserve the absolute worst case amount of items.  So if
+        * both inodes are subvols and we need to unlink them then that would
+        * require 4 item modifications, but if they are both normal inodes it
+        * would require 5 item modifications, so we'll assume their normal
+        * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
+        * should cover the worst case number of items we'll modify.
+        */
+       trans = btrfs_start_transaction(root, 20);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
-       trans = btrfs_start_transaction(root, 1);
        btrfs_set_trans_block_group(trans, new_dir);
 
        if (dest != root)
@@ -5606,7 +6550,6 @@ out_fail:
        if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&root->fs_info->subvol_sem);
 
-       btrfs_unreserve_metadata_space(root, 11);
        return ret;
 }
 
@@ -5658,6 +6601,38 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
        return 0;
 }
 
+int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput)
+{
+       struct btrfs_inode *binode;
+       struct inode *inode = NULL;
+
+       spin_lock(&root->fs_info->delalloc_lock);
+       while (!list_empty(&root->fs_info->delalloc_inodes)) {
+               binode = list_entry(root->fs_info->delalloc_inodes.next,
+                                   struct btrfs_inode, delalloc_inodes);
+               inode = igrab(&binode->vfs_inode);
+               if (inode) {
+                       list_move_tail(&binode->delalloc_inodes,
+                                      &root->fs_info->delalloc_inodes);
+                       break;
+               }
+
+               list_del_init(&binode->delalloc_inodes);
+               cond_resched_lock(&root->fs_info->delalloc_lock);
+       }
+       spin_unlock(&root->fs_info->delalloc_lock);
+
+       if (inode) {
+               write_inode_now(inode, 0);
+               if (delay_iput)
+                       btrfs_add_delayed_iput(inode);
+               else
+                       iput(inode);
+               return 1;
+       }
+       return 0;
+}
+
 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                         const char *symname)
 {
@@ -5681,26 +6656,20 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
                return -ENAMETOOLONG;
 
+       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
+       if (err)
+               return err;
        /*
         * 2 items for inode item and ref
         * 2 items for dir items
         * 1 item for xattr if selinux is on
         */
-       err = btrfs_reserve_metadata_space(root, 5);
-       if (err)
-               return err;
+       trans = btrfs_start_transaction(root, 5);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
-       trans = btrfs_start_transaction(root, 1);
-       if (!trans)
-               goto out_fail;
        btrfs_set_trans_block_group(trans, dir);
 
-       err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
-       if (err) {
-               err = -ENOSPC;
-               goto out_unlock;
-       }
-
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
                                dentry->d_name.len,
                                dentry->d_parent->d_inode->i_ino, objectid,
@@ -5772,8 +6741,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 out_unlock:
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
-out_fail:
-       btrfs_unreserve_metadata_space(root, 5);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -5782,36 +6749,28 @@ out_fail:
        return err;
 }
 
-static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
-                       u64 alloc_hint, int mode, loff_t actual_len)
+int btrfs_prealloc_file_range(struct inode *inode, int mode,
+                             u64 start, u64 num_bytes, u64 min_size,
+                             loff_t actual_len, u64 *alloc_hint)
 {
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_key ins;
-       u64 alloc_size;
        u64 cur_offset = start;
-       u64 num_bytes = end - start;
        int ret = 0;
-       u64 i_size;
 
        while (num_bytes > 0) {
-               alloc_size = min(num_bytes, root->fs_info->max_extent);
-
-               trans = btrfs_start_transaction(root, 1);
-
-               ret = btrfs_reserve_extent(trans, root, alloc_size,
-                                          root->sectorsize, 0, alloc_hint,
-                                          (u64)-1, &ins, 1);
-               if (ret) {
-                       WARN_ON(1);
-                       goto stop_trans;
+               trans = btrfs_start_transaction(root, 3);
+               if (IS_ERR(trans)) {
+                       ret = PTR_ERR(trans);
+                       break;
                }
 
-               ret = btrfs_reserve_metadata_space(root, 3);
+               ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
+                                          0, *alloc_hint, (u64)-1, &ins, 1);
                if (ret) {
-                       btrfs_free_reserved_extent(root, ins.objectid,
-                                                  ins.offset);
-                       goto stop_trans;
+                       btrfs_end_transaction(trans, root);
+                       break;
                }
 
                ret = insert_reserved_file_extent(trans, inode,
@@ -5825,34 +6784,27 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
 
                num_bytes -= ins.offset;
                cur_offset += ins.offset;
-               alloc_hint = ins.objectid + ins.offset;
+               *alloc_hint = ins.objectid + ins.offset;
 
                inode->i_ctime = CURRENT_TIME;
                BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
                if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-                       (actual_len > inode->i_size) &&
-                       (cur_offset > inode->i_size)) {
-
+                   (actual_len > inode->i_size) &&
+                   (cur_offset > inode->i_size)) {
                        if (cur_offset > actual_len)
-                               i_size  = actual_len;
+                               i_size_write(inode, actual_len);
                        else
-                               i_size = cur_offset;
-                       i_size_write(inode, i_size);
-                       btrfs_ordered_update_i_size(inode, i_size, NULL);
+                               i_size_write(inode, cur_offset);
+                       i_size_write(inode, cur_offset);
+                       btrfs_ordered_update_i_size(inode, cur_offset, NULL);
                }
 
                ret = btrfs_update_inode(trans, root, inode);
                BUG_ON(ret);
 
                btrfs_end_transaction(trans, root);
-               btrfs_unreserve_metadata_space(root, 3);
        }
        return ret;
-
-stop_trans:
-       btrfs_end_transaction(trans, root);
-       return ret;
-
 }
 
 static long btrfs_fallocate(struct inode *inode, int mode,
@@ -5885,8 +6837,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
                        goto out;
        }
 
-       ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode,
-                                         alloc_end - alloc_start);
+       ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
        if (ret)
                goto out;
 
@@ -5931,16 +6882,16 @@ static long btrfs_fallocate(struct inode *inode, int mode,
                if (em->block_start == EXTENT_MAP_HOLE ||
                    (cur_offset >= inode->i_size &&
                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
-                       ret = prealloc_file_range(inode,
-                                                 cur_offset, last_byte,
-                                               alloc_hint, mode, offset+len);
+                       ret = btrfs_prealloc_file_range(inode, 0, cur_offset,
+                                                       last_byte - cur_offset,
+                                                       1 << inode->i_blkbits,
+                                                       offset + len,
+                                                       &alloc_hint);
                        if (ret < 0) {
                                free_extent_map(em);
                                break;
                        }
                }
-               if (em->block_start <= EXTENT_MAP_LAST_BYTE)
-                       alloc_hint = em->block_start;
                free_extent_map(em);
 
                cur_offset = last_byte;
@@ -5952,8 +6903,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
                             &cached_state, GFP_NOFS);
 
-       btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
-                                      alloc_end - alloc_start);
+       btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
 out:
        mutex_unlock(&inode->i_mutex);
        return ret;