2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
22 #include "transaction.h"
25 #include "print-tree.h"
29 /* magic values for the inode_only field in btrfs_log_inode:
31 * LOG_INODE_ALL means to log everything
32 * LOG_INODE_EXISTS means to log just enough to recreate the inode
35 #define LOG_INODE_ALL 0
36 #define LOG_INODE_EXISTS 1
39 * directory trouble cases
41 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
42 * log, we must force a full commit before doing an fsync of the directory
43 * where the unlink was done.
44 * ---> record transid of last unlink/rename per directory
48 * rename foo/some_dir foo2/some_dir
50 * fsync foo/some_dir/some_file
52 * The fsync above will unlink the original some_dir without recording
53 * it in its new location (foo2). After a crash, some_dir will be gone
54 * unless the fsync of some_file forces a full commit
56 * 2) we must log any new names for any file or dir that is in the fsync
57 * log. ---> check inode while renaming/linking.
59 * 2a) we must log any new names for any file or dir during rename
60 * when the directory they are being removed from was logged.
61 * ---> check inode and old parent dir during rename
63 * 2a is actually the more important variant. With the extra logging
64 * a crash might unlink the old name without recreating the new one
66 * 3) after a crash, we must go through any directories with a link count
67 * of zero and redo the rm -rf
74 * The directory f1 was fully removed from the FS, but fsync was never
75 * called on f1, only its parent dir. After a crash the rm -rf must
76 * be replayed. This must be able to recurse down the entire
77 * directory tree. The inode link count fixup code takes care of the
82 * stages for the tree walking. The first
83 * stage (0) is to only pin down the blocks we find
84 * the second stage (1) is to make sure that all the inodes
85 * we find in the log are created in the subvolume.
87 * The last stage is to deal with directories and links and extents
88 * and all the other fun semantics
90 #define LOG_WALK_PIN_ONLY 0
91 #define LOG_WALK_REPLAY_INODES 1
92 #define LOG_WALK_REPLAY_ALL 2
94 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
95 struct btrfs_root *root, struct inode *inode,
97 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root,
99 struct btrfs_path *path, u64 objectid);
100 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_root *log,
103 struct btrfs_path *path,
104 u64 dirid, int del_all);
107 * tree logging is a special write ahead log used to make sure that
108 * fsyncs and O_SYNCs can happen without doing full tree commits.
110 * Full tree commits are expensive because they require commonly
111 * modified blocks to be recowed, creating many dirty pages in the
112 * extent tree an 4x-6x higher write load than ext3.
114 * Instead of doing a tree commit on every fsync, we use the
115 * key ranges and transaction ids to find items for a given file or directory
116 * that have changed in this transaction. Those items are copied into
117 * a special tree (one per subvolume root), that tree is written to disk
118 * and then the fsync is considered complete.
120 * After a crash, items are copied out of the log-tree back into the
121 * subvolume tree. Any file data extents found are recorded in the extent
122 * allocation tree, and the log-tree freed.
124 * The log tree is read three times, once to pin down all the extents it is
125 * using in ram and once, once to create all the inodes logged in the tree
126 * and once to do all the other items.
130 * start a sub transaction and setup the log tree
131 * this increments the log tree writer count to make the people
132 * syncing the tree wait for us to finish
134 static int start_log_trans(struct btrfs_trans_handle *trans,
135 struct btrfs_root *root)
140 mutex_lock(&root->log_mutex);
141 if (root->log_root) {
142 if (!root->log_start_pid) {
143 root->log_start_pid = current->pid;
144 root->log_multiple_pids = false;
145 } else if (root->log_start_pid != current->pid) {
146 root->log_multiple_pids = true;
150 atomic_inc(&root->log_writers);
151 mutex_unlock(&root->log_mutex);
154 root->log_multiple_pids = false;
155 root->log_start_pid = current->pid;
156 mutex_lock(&root->fs_info->tree_log_mutex);
157 if (!root->fs_info->log_root_tree) {
158 ret = btrfs_init_log_root_tree(trans, root->fs_info);
162 if (err == 0 && !root->log_root) {
163 ret = btrfs_add_log_tree(trans, root);
167 mutex_unlock(&root->fs_info->tree_log_mutex);
169 atomic_inc(&root->log_writers);
170 mutex_unlock(&root->log_mutex);
175 * returns 0 if there was a log transaction running and we were able
176 * to join, or returns -ENOENT if there were not transactions
179 static int join_running_log_trans(struct btrfs_root *root)
187 mutex_lock(&root->log_mutex);
188 if (root->log_root) {
190 atomic_inc(&root->log_writers);
192 mutex_unlock(&root->log_mutex);
197 * This either makes the current running log transaction wait
198 * until you call btrfs_end_log_trans() or it makes any future
199 * log transactions wait until you call btrfs_end_log_trans()
201 int btrfs_pin_log_trans(struct btrfs_root *root)
205 mutex_lock(&root->log_mutex);
206 atomic_inc(&root->log_writers);
207 mutex_unlock(&root->log_mutex);
212 * indicate we're done making changes to the log tree
213 * and wake up anyone waiting to do a sync
215 int btrfs_end_log_trans(struct btrfs_root *root)
217 if (atomic_dec_and_test(&root->log_writers)) {
219 if (waitqueue_active(&root->log_writer_wait))
220 wake_up(&root->log_writer_wait);
227 * the walk control struct is used to pass state down the chain when
228 * processing the log tree. The stage field tells us which part
229 * of the log tree processing we are currently doing. The others
230 * are state fields used for that specific part
232 struct walk_control {
233 /* should we free the extent on disk when done? This is used
234 * at transaction commit time while freeing a log tree
238 /* should we write out the extent buffer? This is used
239 * while flushing the log tree to disk during a sync
243 /* should we wait for the extent buffer io to finish? Also used
244 * while flushing the log tree to disk for a sync
248 /* pin only walk, we record which extents on disk belong to the
253 /* what stage of the replay code we're currently in */
256 /* the root we are currently replaying */
257 struct btrfs_root *replay_dest;
259 /* the trans handle for the current replay */
260 struct btrfs_trans_handle *trans;
262 /* the function that gets used to process blocks we find in the
263 * tree. Note the extent_buffer might not be up to date when it is
264 * passed in, and it must be checked or read if you need the data
267 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
268 struct walk_control *wc, u64 gen);
272 * process_func used to pin down extents, write them or wait on them
274 static int process_one_buffer(struct btrfs_root *log,
275 struct extent_buffer *eb,
276 struct walk_control *wc, u64 gen)
279 btrfs_pin_extent_for_log_replay(wc->trans,
280 log->fs_info->extent_root,
283 if (btrfs_buffer_uptodate(eb, gen)) {
285 btrfs_write_tree_block(eb);
287 btrfs_wait_tree_block_writeback(eb);
293 * Item overwrite used by replay and tree logging. eb, slot and key all refer
294 * to the src data we are copying out.
296 * root is the tree we are copying into, and path is a scratch
297 * path for use in this function (it should be released on entry and
298 * will be released on exit).
300 * If the key is already in the destination tree the existing item is
301 * overwritten. If the existing item isn't big enough, it is extended.
302 * If it is too large, it is truncated.
304 * If the key isn't in the destination yet, a new item is inserted.
306 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
307 struct btrfs_root *root,
308 struct btrfs_path *path,
309 struct extent_buffer *eb, int slot,
310 struct btrfs_key *key)
314 u64 saved_i_size = 0;
315 int save_old_i_size = 0;
316 unsigned long src_ptr;
317 unsigned long dst_ptr;
318 int overwrite_root = 0;
319 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
321 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
324 item_size = btrfs_item_size_nr(eb, slot);
325 src_ptr = btrfs_item_ptr_offset(eb, slot);
327 /* look for the key in the destination tree */
328 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
335 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
337 if (dst_size != item_size)
340 if (item_size == 0) {
341 btrfs_release_path(path);
344 dst_copy = kmalloc(item_size, GFP_NOFS);
345 src_copy = kmalloc(item_size, GFP_NOFS);
346 if (!dst_copy || !src_copy) {
347 btrfs_release_path(path);
353 read_extent_buffer(eb, src_copy, src_ptr, item_size);
355 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
356 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
358 ret = memcmp(dst_copy, src_copy, item_size);
363 * they have the same contents, just return, this saves
364 * us from cowing blocks in the destination tree and doing
365 * extra writes that may not have been done by a previous
369 btrfs_release_path(path);
374 * We need to load the old nbytes into the inode so when we
375 * replay the extents we've logged we get the right nbytes.
378 struct btrfs_inode_item *item;
381 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
382 struct btrfs_inode_item);
383 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
384 item = btrfs_item_ptr(eb, slot,
385 struct btrfs_inode_item);
386 btrfs_set_inode_nbytes(eb, item, nbytes);
388 } else if (inode_item) {
389 struct btrfs_inode_item *item;
392 * New inode, set nbytes to 0 so that the nbytes comes out
393 * properly when we replay the extents.
395 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
396 btrfs_set_inode_nbytes(eb, item, 0);
399 btrfs_release_path(path);
400 /* try to insert the key into the destination tree */
401 ret = btrfs_insert_empty_item(trans, root, path,
404 /* make sure any existing item is the correct size */
405 if (ret == -EEXIST) {
407 found_size = btrfs_item_size_nr(path->nodes[0],
409 if (found_size > item_size) {
410 btrfs_truncate_item(trans, root, path, item_size, 1);
411 } else if (found_size < item_size) {
412 ret = btrfs_extend_item(trans, root, path,
413 item_size - found_size);
418 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
421 /* don't overwrite an existing inode if the generation number
422 * was logged as zero. This is done when the tree logging code
423 * is just logging an inode to make sure it exists after recovery.
425 * Also, don't overwrite i_size on directories during replay.
426 * log replay inserts and removes directory items based on the
427 * state of the tree found in the subvolume, and i_size is modified
430 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
431 struct btrfs_inode_item *src_item;
432 struct btrfs_inode_item *dst_item;
434 src_item = (struct btrfs_inode_item *)src_ptr;
435 dst_item = (struct btrfs_inode_item *)dst_ptr;
437 if (btrfs_inode_generation(eb, src_item) == 0)
440 if (overwrite_root &&
441 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
442 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
444 saved_i_size = btrfs_inode_size(path->nodes[0],
449 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
452 if (save_old_i_size) {
453 struct btrfs_inode_item *dst_item;
454 dst_item = (struct btrfs_inode_item *)dst_ptr;
455 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
458 /* make sure the generation is filled in */
459 if (key->type == BTRFS_INODE_ITEM_KEY) {
460 struct btrfs_inode_item *dst_item;
461 dst_item = (struct btrfs_inode_item *)dst_ptr;
462 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
463 btrfs_set_inode_generation(path->nodes[0], dst_item,
468 btrfs_mark_buffer_dirty(path->nodes[0]);
469 btrfs_release_path(path);
474 * simple helper to read an inode off the disk from a given root
475 * This can only be called for subvolume roots and not for the log
477 static noinline struct inode *read_one_inode(struct btrfs_root *root,
480 struct btrfs_key key;
483 key.objectid = objectid;
484 key.type = BTRFS_INODE_ITEM_KEY;
486 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
489 } else if (is_bad_inode(inode)) {
496 /* replays a single extent in 'eb' at 'slot' with 'key' into the
497 * subvolume 'root'. path is released on entry and should be released
500 * extents in the log tree have not been allocated out of the extent
501 * tree yet. So, this completes the allocation, taking a reference
502 * as required if the extent already exists or creating a new extent
503 * if it isn't in the extent allocation tree yet.
505 * The extent is inserted into the file, dropping any existing extents
506 * from the file that overlap the new one.
508 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
509 struct btrfs_root *root,
510 struct btrfs_path *path,
511 struct extent_buffer *eb, int slot,
512 struct btrfs_key *key)
515 u64 mask = root->sectorsize - 1;
518 u64 start = key->offset;
520 struct btrfs_file_extent_item *item;
521 struct inode *inode = NULL;
525 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
526 found_type = btrfs_file_extent_type(eb, item);
528 if (found_type == BTRFS_FILE_EXTENT_REG ||
529 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
530 nbytes = btrfs_file_extent_num_bytes(eb, item);
531 extent_end = start + nbytes;
534 * We don't add to the inodes nbytes if we are prealloc or a
537 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
539 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
540 size = btrfs_file_extent_inline_len(eb, slot, item);
541 nbytes = btrfs_file_extent_ram_bytes(eb, item);
542 extent_end = (start + size + mask) & ~mask;
548 inode = read_one_inode(root, key->objectid);
555 * first check to see if we already have this extent in the
556 * file. This must be done before the btrfs_drop_extents run
557 * so we don't try to drop this extent.
559 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
563 (found_type == BTRFS_FILE_EXTENT_REG ||
564 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
565 struct btrfs_file_extent_item cmp1;
566 struct btrfs_file_extent_item cmp2;
567 struct btrfs_file_extent_item *existing;
568 struct extent_buffer *leaf;
570 leaf = path->nodes[0];
571 existing = btrfs_item_ptr(leaf, path->slots[0],
572 struct btrfs_file_extent_item);
574 read_extent_buffer(eb, &cmp1, (unsigned long)item,
576 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
580 * we already have a pointer to this exact extent,
581 * we don't have to do anything
583 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
584 btrfs_release_path(path);
588 btrfs_release_path(path);
590 /* drop any overlapping extents */
591 ret = btrfs_drop_extents(trans, inode, start, extent_end,
595 if (found_type == BTRFS_FILE_EXTENT_REG ||
596 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
598 unsigned long dest_offset;
599 struct btrfs_key ins;
601 ret = btrfs_insert_empty_item(trans, root, path, key,
604 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
606 copy_extent_buffer(path->nodes[0], eb, dest_offset,
607 (unsigned long)item, sizeof(*item));
609 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
610 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
611 ins.type = BTRFS_EXTENT_ITEM_KEY;
612 offset = key->offset - btrfs_file_extent_offset(eb, item);
614 if (ins.objectid > 0) {
617 LIST_HEAD(ordered_sums);
619 * is this extent already allocated in the extent
620 * allocation tree? If so, just add a reference
622 ret = btrfs_lookup_extent(root, ins.objectid,
625 ret = btrfs_inc_extent_ref(trans, root,
626 ins.objectid, ins.offset,
627 0, root->root_key.objectid,
628 key->objectid, offset);
632 * insert the extent pointer in the extent
635 ret = btrfs_alloc_logged_file_extent(trans,
636 root, root->root_key.objectid,
637 key->objectid, offset, &ins);
640 btrfs_release_path(path);
642 if (btrfs_file_extent_compression(eb, item)) {
643 csum_start = ins.objectid;
644 csum_end = csum_start + ins.offset;
646 csum_start = ins.objectid +
647 btrfs_file_extent_offset(eb, item);
648 csum_end = csum_start +
649 btrfs_file_extent_num_bytes(eb, item);
652 ret = btrfs_lookup_csums_range(root->log_root,
653 csum_start, csum_end - 1,
656 while (!list_empty(&ordered_sums)) {
657 struct btrfs_ordered_sum *sums;
658 sums = list_entry(ordered_sums.next,
659 struct btrfs_ordered_sum,
661 ret = btrfs_csum_file_blocks(trans,
662 root->fs_info->csum_root,
665 list_del(&sums->list);
669 btrfs_release_path(path);
671 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
672 /* inline extents are easy, we just overwrite them */
673 ret = overwrite_item(trans, root, path, eb, slot, key);
677 inode_add_bytes(inode, nbytes);
678 btrfs_update_inode(trans, root, inode);
686 * when cleaning up conflicts between the directory names in the
687 * subvolume, directory names in the log and directory names in the
688 * inode back references, we may have to unlink inodes from directories.
690 * This is a helper function to do the unlink of a specific directory
693 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
694 struct btrfs_root *root,
695 struct btrfs_path *path,
697 struct btrfs_dir_item *di)
702 struct extent_buffer *leaf;
703 struct btrfs_key location;
706 leaf = path->nodes[0];
708 btrfs_dir_item_key_to_cpu(leaf, di, &location);
709 name_len = btrfs_dir_name_len(leaf, di);
710 name = kmalloc(name_len, GFP_NOFS);
714 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
715 btrfs_release_path(path);
717 inode = read_one_inode(root, location.objectid);
723 ret = link_to_fixup_dir(trans, root, path, location.objectid);
726 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
732 btrfs_run_delayed_items(trans, root);
737 * helper function to see if a given name and sequence number found
738 * in an inode back reference are already in a directory and correctly
739 * point to this inode
741 static noinline int inode_in_dir(struct btrfs_root *root,
742 struct btrfs_path *path,
743 u64 dirid, u64 objectid, u64 index,
744 const char *name, int name_len)
746 struct btrfs_dir_item *di;
747 struct btrfs_key location;
750 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
751 index, name, name_len, 0);
752 if (di && !IS_ERR(di)) {
753 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
754 if (location.objectid != objectid)
758 btrfs_release_path(path);
760 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
761 if (di && !IS_ERR(di)) {
762 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
763 if (location.objectid != objectid)
769 btrfs_release_path(path);
774 * helper function to check a log tree for a named back reference in
775 * an inode. This is used to decide if a back reference that is
776 * found in the subvolume conflicts with what we find in the log.
778 * inode backreferences may have multiple refs in a single item,
779 * during replay we process one reference at a time, and we don't
780 * want to delete valid links to a file from the subvolume if that
781 * link is also in the log.
783 static noinline int backref_in_log(struct btrfs_root *log,
784 struct btrfs_key *key,
785 char *name, int namelen)
787 struct btrfs_path *path;
788 struct btrfs_inode_ref *ref;
790 unsigned long ptr_end;
791 unsigned long name_ptr;
797 path = btrfs_alloc_path();
801 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
805 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
806 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
807 ptr_end = ptr + item_size;
808 while (ptr < ptr_end) {
809 ref = (struct btrfs_inode_ref *)ptr;
810 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
811 if (found_name_len == namelen) {
812 name_ptr = (unsigned long)(ref + 1);
813 ret = memcmp_extent_buffer(path->nodes[0], name,
820 ptr = (unsigned long)(ref + 1) + found_name_len;
823 btrfs_free_path(path);
829 * replay one inode back reference item found in the log tree.
830 * eb, slot and key refer to the buffer and key found in the log tree.
831 * root is the destination we are replaying into, and path is for temp
832 * use by this function. (it should be released on return).
834 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
835 struct btrfs_root *root,
836 struct btrfs_root *log,
837 struct btrfs_path *path,
838 struct extent_buffer *eb, int slot,
839 struct btrfs_key *key)
841 struct btrfs_inode_ref *ref;
842 struct btrfs_dir_item *di;
845 unsigned long ref_ptr;
846 unsigned long ref_end;
853 * it is possible that we didn't log all the parent directories
854 * for a given inode. If we don't find the dir, just don't
855 * copy the back ref in. The link count fixup code will take
858 dir = read_one_inode(root, key->offset);
862 inode = read_one_inode(root, key->objectid);
868 ref_ptr = btrfs_item_ptr_offset(eb, slot);
869 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
872 ref = (struct btrfs_inode_ref *)ref_ptr;
874 namelen = btrfs_inode_ref_name_len(eb, ref);
875 name = kmalloc(namelen, GFP_NOFS);
878 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
880 /* if we already have a perfect match, we're done */
881 if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
882 btrfs_inode_ref_index(eb, ref),
888 * look for a conflicting back reference in the metadata.
889 * if we find one we have to unlink that name of the file
890 * before we add our new link. Later on, we overwrite any
891 * existing back reference, and we don't want to create
892 * dangling pointers in the directory.
898 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
902 struct btrfs_inode_ref *victim_ref;
904 unsigned long ptr_end;
905 struct extent_buffer *leaf = path->nodes[0];
907 /* are we trying to overwrite a back ref for the root directory
908 * if so, just jump out, we're done
910 if (key->objectid == key->offset)
913 /* check all the names in this back reference to see
914 * if they are in the log. if so, we allow them to stay
915 * otherwise they must be unlinked as a conflict
917 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
918 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
919 while (ptr < ptr_end) {
920 victim_ref = (struct btrfs_inode_ref *)ptr;
921 victim_name_len = btrfs_inode_ref_name_len(leaf,
923 victim_name = kmalloc(victim_name_len, GFP_NOFS);
924 BUG_ON(!victim_name);
926 read_extent_buffer(leaf, victim_name,
927 (unsigned long)(victim_ref + 1),
930 if (!backref_in_log(log, key, victim_name,
932 btrfs_inc_nlink(inode);
933 btrfs_release_path(path);
935 ret = btrfs_unlink_inode(trans, root, dir,
938 btrfs_run_delayed_items(trans, root);
941 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
946 * NOTE: we have searched root tree and checked the
947 * coresponding ref, it does not need to check again.
951 btrfs_release_path(path);
953 /* look for a conflicting sequence number */
954 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
955 btrfs_inode_ref_index(eb, ref),
957 if (di && !IS_ERR(di)) {
958 ret = drop_one_dir_item(trans, root, path, dir, di);
961 btrfs_release_path(path);
963 /* look for a conflicing name */
964 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
966 if (di && !IS_ERR(di)) {
967 ret = drop_one_dir_item(trans, root, path, dir, di);
970 btrfs_release_path(path);
973 /* insert our name */
974 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
975 btrfs_inode_ref_index(eb, ref));
978 btrfs_update_inode(trans, root, inode);
981 ref_ptr = (unsigned long)(ref + 1) + namelen;
983 if (ref_ptr < ref_end)
986 /* finally write the back reference in the inode */
987 ret = overwrite_item(trans, root, path, eb, slot, key);
991 btrfs_release_path(path);
997 static int insert_orphan_item(struct btrfs_trans_handle *trans,
998 struct btrfs_root *root, u64 offset)
1001 ret = btrfs_find_orphan_item(root, offset);
1003 ret = btrfs_insert_orphan_item(trans, root, offset);
1009 * There are a few corners where the link count of the file can't
1010 * be properly maintained during replay. So, instead of adding
1011 * lots of complexity to the log code, we just scan the backrefs
1012 * for any file that has been through replay.
1014 * The scan will update the link count on the inode to reflect the
1015 * number of back refs found. If it goes down to zero, the iput
1016 * will free the inode.
1018 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1019 struct btrfs_root *root,
1020 struct inode *inode)
1022 struct btrfs_path *path;
1024 struct btrfs_key key;
1027 unsigned long ptr_end;
1029 u64 ino = btrfs_ino(inode);
1032 key.type = BTRFS_INODE_REF_KEY;
1033 key.offset = (u64)-1;
1035 path = btrfs_alloc_path();
1040 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1044 if (path->slots[0] == 0)
1048 btrfs_item_key_to_cpu(path->nodes[0], &key,
1050 if (key.objectid != ino ||
1051 key.type != BTRFS_INODE_REF_KEY)
1053 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1054 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1056 while (ptr < ptr_end) {
1057 struct btrfs_inode_ref *ref;
1059 ref = (struct btrfs_inode_ref *)ptr;
1060 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1062 ptr = (unsigned long)(ref + 1) + name_len;
1066 if (key.offset == 0)
1069 btrfs_release_path(path);
1071 btrfs_release_path(path);
1072 if (nlink != inode->i_nlink) {
1073 set_nlink(inode, nlink);
1074 btrfs_update_inode(trans, root, inode);
1076 BTRFS_I(inode)->index_cnt = (u64)-1;
1078 if (inode->i_nlink == 0) {
1079 if (S_ISDIR(inode->i_mode)) {
1080 ret = replay_dir_deletes(trans, root, NULL, path,
1084 ret = insert_orphan_item(trans, root, ino);
1087 btrfs_free_path(path);
1092 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1093 struct btrfs_root *root,
1094 struct btrfs_path *path)
1097 struct btrfs_key key;
1098 struct inode *inode;
1100 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1101 key.type = BTRFS_ORPHAN_ITEM_KEY;
1102 key.offset = (u64)-1;
1104 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1109 if (path->slots[0] == 0)
1114 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1115 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1116 key.type != BTRFS_ORPHAN_ITEM_KEY)
1119 ret = btrfs_del_item(trans, root, path);
1123 btrfs_release_path(path);
1124 inode = read_one_inode(root, key.offset);
1128 ret = fixup_inode_link_count(trans, root, inode);
1134 * fixup on a directory may create new entries,
1135 * make sure we always look for the highset possible
1138 key.offset = (u64)-1;
1142 btrfs_release_path(path);
1148 * record a given inode in the fixup dir so we can check its link
1149 * count when replay is done. The link count is incremented here
1150 * so the inode won't go away until we check it
1152 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1153 struct btrfs_root *root,
1154 struct btrfs_path *path,
1157 struct btrfs_key key;
1159 struct inode *inode;
1161 inode = read_one_inode(root, objectid);
1165 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1166 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1167 key.offset = objectid;
1169 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1171 btrfs_release_path(path);
1173 btrfs_inc_nlink(inode);
1174 btrfs_update_inode(trans, root, inode);
1175 } else if (ret == -EEXIST) {
1186 * when replaying the log for a directory, we only insert names
1187 * for inodes that actually exist. This means an fsync on a directory
1188 * does not implicitly fsync all the new files in it
1190 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1191 struct btrfs_root *root,
1192 struct btrfs_path *path,
1193 u64 dirid, u64 index,
1194 char *name, int name_len, u8 type,
1195 struct btrfs_key *location)
1197 struct inode *inode;
1201 inode = read_one_inode(root, location->objectid);
1205 dir = read_one_inode(root, dirid);
1210 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1212 /* FIXME, put inode into FIXUP list */
1220 * take a single entry in a log directory item and replay it into
1223 * if a conflicting item exists in the subdirectory already,
1224 * the inode it points to is unlinked and put into the link count
1227 * If a name from the log points to a file or directory that does
1228 * not exist in the FS, it is skipped. fsyncs on directories
1229 * do not force down inodes inside that directory, just changes to the
1230 * names or unlinks in a directory.
1232 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1233 struct btrfs_root *root,
1234 struct btrfs_path *path,
1235 struct extent_buffer *eb,
1236 struct btrfs_dir_item *di,
1237 struct btrfs_key *key)
1241 struct btrfs_dir_item *dst_di;
1242 struct btrfs_key found_key;
1243 struct btrfs_key log_key;
1249 dir = read_one_inode(root, key->objectid);
1253 name_len = btrfs_dir_name_len(eb, di);
1254 name = kmalloc(name_len, GFP_NOFS);
1258 log_type = btrfs_dir_type(eb, di);
1259 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1262 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1263 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1268 btrfs_release_path(path);
1270 if (key->type == BTRFS_DIR_ITEM_KEY) {
1271 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1273 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1274 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1281 if (IS_ERR_OR_NULL(dst_di)) {
1282 /* we need a sequence number to insert, so we only
1283 * do inserts for the BTRFS_DIR_INDEX_KEY types
1285 if (key->type != BTRFS_DIR_INDEX_KEY)
1290 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1291 /* the existing item matches the logged item */
1292 if (found_key.objectid == log_key.objectid &&
1293 found_key.type == log_key.type &&
1294 found_key.offset == log_key.offset &&
1295 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1300 * don't drop the conflicting directory entry if the inode
1301 * for the new entry doesn't exist
1306 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1309 if (key->type == BTRFS_DIR_INDEX_KEY)
1312 btrfs_release_path(path);
1318 btrfs_release_path(path);
1319 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1320 name, name_len, log_type, &log_key);
1322 BUG_ON(ret && ret != -ENOENT);
1327 * find all the names in a directory item and reconcile them into
1328 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1329 * one name in a directory item, but the same code gets used for
1330 * both directory index types
1332 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1333 struct btrfs_root *root,
1334 struct btrfs_path *path,
1335 struct extent_buffer *eb, int slot,
1336 struct btrfs_key *key)
1339 u32 item_size = btrfs_item_size_nr(eb, slot);
1340 struct btrfs_dir_item *di;
1343 unsigned long ptr_end;
1345 ptr = btrfs_item_ptr_offset(eb, slot);
1346 ptr_end = ptr + item_size;
1347 while (ptr < ptr_end) {
1348 di = (struct btrfs_dir_item *)ptr;
1349 if (verify_dir_item(root, eb, di))
1351 name_len = btrfs_dir_name_len(eb, di);
1352 ret = replay_one_name(trans, root, path, eb, di, key);
1354 ptr = (unsigned long)(di + 1);
1361 * directory replay has two parts. There are the standard directory
1362 * items in the log copied from the subvolume, and range items
1363 * created in the log while the subvolume was logged.
1365 * The range items tell us which parts of the key space the log
1366 * is authoritative for. During replay, if a key in the subvolume
1367 * directory is in a logged range item, but not actually in the log
1368 * that means it was deleted from the directory before the fsync
1369 * and should be removed.
1371 static noinline int find_dir_range(struct btrfs_root *root,
1372 struct btrfs_path *path,
1373 u64 dirid, int key_type,
1374 u64 *start_ret, u64 *end_ret)
1376 struct btrfs_key key;
1378 struct btrfs_dir_log_item *item;
1382 if (*start_ret == (u64)-1)
1385 key.objectid = dirid;
1386 key.type = key_type;
1387 key.offset = *start_ret;
1389 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1393 if (path->slots[0] == 0)
1398 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1400 if (key.type != key_type || key.objectid != dirid) {
1404 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1405 struct btrfs_dir_log_item);
1406 found_end = btrfs_dir_log_end(path->nodes[0], item);
1408 if (*start_ret >= key.offset && *start_ret <= found_end) {
1410 *start_ret = key.offset;
1411 *end_ret = found_end;
1416 /* check the next slot in the tree to see if it is a valid item */
1417 nritems = btrfs_header_nritems(path->nodes[0]);
1419 if (path->slots[0] >= nritems) {
1420 ret = btrfs_next_leaf(root, path);
1425 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1427 if (key.type != key_type || key.objectid != dirid) {
1431 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1432 struct btrfs_dir_log_item);
1433 found_end = btrfs_dir_log_end(path->nodes[0], item);
1434 *start_ret = key.offset;
1435 *end_ret = found_end;
1438 btrfs_release_path(path);
1443 * this looks for a given directory item in the log. If the directory
1444 * item is not in the log, the item is removed and the inode it points
1447 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1448 struct btrfs_root *root,
1449 struct btrfs_root *log,
1450 struct btrfs_path *path,
1451 struct btrfs_path *log_path,
1453 struct btrfs_key *dir_key)
1456 struct extent_buffer *eb;
1459 struct btrfs_dir_item *di;
1460 struct btrfs_dir_item *log_di;
1463 unsigned long ptr_end;
1465 struct inode *inode;
1466 struct btrfs_key location;
1469 eb = path->nodes[0];
1470 slot = path->slots[0];
1471 item_size = btrfs_item_size_nr(eb, slot);
1472 ptr = btrfs_item_ptr_offset(eb, slot);
1473 ptr_end = ptr + item_size;
1474 while (ptr < ptr_end) {
1475 di = (struct btrfs_dir_item *)ptr;
1476 if (verify_dir_item(root, eb, di)) {
1481 name_len = btrfs_dir_name_len(eb, di);
1482 name = kmalloc(name_len, GFP_NOFS);
1487 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1490 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1491 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1494 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1495 log_di = btrfs_lookup_dir_index_item(trans, log,
1501 if (IS_ERR_OR_NULL(log_di)) {
1502 btrfs_dir_item_key_to_cpu(eb, di, &location);
1503 btrfs_release_path(path);
1504 btrfs_release_path(log_path);
1505 inode = read_one_inode(root, location.objectid);
1511 ret = link_to_fixup_dir(trans, root,
1512 path, location.objectid);
1514 btrfs_inc_nlink(inode);
1515 ret = btrfs_unlink_inode(trans, root, dir, inode,
1519 btrfs_run_delayed_items(trans, root);
1524 /* there might still be more names under this key
1525 * check and repeat if required
1527 ret = btrfs_search_slot(NULL, root, dir_key, path,
1534 btrfs_release_path(log_path);
1537 ptr = (unsigned long)(di + 1);
1542 btrfs_release_path(path);
1543 btrfs_release_path(log_path);
1548 * deletion replay happens before we copy any new directory items
1549 * out of the log or out of backreferences from inodes. It
1550 * scans the log to find ranges of keys that log is authoritative for,
1551 * and then scans the directory to find items in those ranges that are
1552 * not present in the log.
1554 * Anything we don't find in the log is unlinked and removed from the
1557 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1558 struct btrfs_root *root,
1559 struct btrfs_root *log,
1560 struct btrfs_path *path,
1561 u64 dirid, int del_all)
1565 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1567 struct btrfs_key dir_key;
1568 struct btrfs_key found_key;
1569 struct btrfs_path *log_path;
1572 dir_key.objectid = dirid;
1573 dir_key.type = BTRFS_DIR_ITEM_KEY;
1574 log_path = btrfs_alloc_path();
1578 dir = read_one_inode(root, dirid);
1579 /* it isn't an error if the inode isn't there, that can happen
1580 * because we replay the deletes before we copy in the inode item
1584 btrfs_free_path(log_path);
1592 range_end = (u64)-1;
1594 ret = find_dir_range(log, path, dirid, key_type,
1595 &range_start, &range_end);
1600 dir_key.offset = range_start;
1603 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1608 nritems = btrfs_header_nritems(path->nodes[0]);
1609 if (path->slots[0] >= nritems) {
1610 ret = btrfs_next_leaf(root, path);
1614 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1616 if (found_key.objectid != dirid ||
1617 found_key.type != dir_key.type)
1620 if (found_key.offset > range_end)
1623 ret = check_item_in_log(trans, root, log, path,
1627 if (found_key.offset == (u64)-1)
1629 dir_key.offset = found_key.offset + 1;
1631 btrfs_release_path(path);
1632 if (range_end == (u64)-1)
1634 range_start = range_end + 1;
1639 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
1640 key_type = BTRFS_DIR_LOG_INDEX_KEY;
1641 dir_key.type = BTRFS_DIR_INDEX_KEY;
1642 btrfs_release_path(path);
1646 btrfs_release_path(path);
1647 btrfs_free_path(log_path);
1653 * the process_func used to replay items from the log tree. This
1654 * gets called in two different stages. The first stage just looks
1655 * for inodes and makes sure they are all copied into the subvolume.
1657 * The second stage copies all the other item types from the log into
1658 * the subvolume. The two stage approach is slower, but gets rid of
1659 * lots of complexity around inodes referencing other inodes that exist
1660 * only in the log (references come from either directory items or inode
1663 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1664 struct walk_control *wc, u64 gen)
1667 struct btrfs_path *path;
1668 struct btrfs_root *root = wc->replay_dest;
1669 struct btrfs_key key;
1674 btrfs_read_buffer(eb, gen);
1676 level = btrfs_header_level(eb);
1681 path = btrfs_alloc_path();
1685 nritems = btrfs_header_nritems(eb);
1686 for (i = 0; i < nritems; i++) {
1687 btrfs_item_key_to_cpu(eb, &key, i);
1689 /* inode keys are done during the first stage */
1690 if (key.type == BTRFS_INODE_ITEM_KEY &&
1691 wc->stage == LOG_WALK_REPLAY_INODES) {
1692 struct btrfs_inode_item *inode_item;
1695 inode_item = btrfs_item_ptr(eb, i,
1696 struct btrfs_inode_item);
1697 mode = btrfs_inode_mode(eb, inode_item);
1698 if (S_ISDIR(mode)) {
1699 ret = replay_dir_deletes(wc->trans,
1700 root, log, path, key.objectid, 0);
1703 ret = overwrite_item(wc->trans, root, path,
1707 /* for regular files, make sure corresponding
1708 * orhpan item exist. extents past the new EOF
1709 * will be truncated later by orphan cleanup.
1711 if (S_ISREG(mode)) {
1712 ret = insert_orphan_item(wc->trans, root,
1717 ret = link_to_fixup_dir(wc->trans, root,
1718 path, key.objectid);
1721 if (wc->stage < LOG_WALK_REPLAY_ALL)
1724 /* these keys are simply copied */
1725 if (key.type == BTRFS_XATTR_ITEM_KEY) {
1726 ret = overwrite_item(wc->trans, root, path,
1729 } else if (key.type == BTRFS_INODE_REF_KEY) {
1730 ret = add_inode_ref(wc->trans, root, log, path,
1732 BUG_ON(ret && ret != -ENOENT);
1733 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
1734 ret = replay_one_extent(wc->trans, root, path,
1737 } else if (key.type == BTRFS_DIR_ITEM_KEY ||
1738 key.type == BTRFS_DIR_INDEX_KEY) {
1739 ret = replay_one_dir_item(wc->trans, root, path,
1744 btrfs_free_path(path);
1748 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1749 struct btrfs_root *root,
1750 struct btrfs_path *path, int *level,
1751 struct walk_control *wc)
1756 struct extent_buffer *next;
1757 struct extent_buffer *cur;
1758 struct extent_buffer *parent;
1762 WARN_ON(*level < 0);
1763 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1765 while (*level > 0) {
1766 WARN_ON(*level < 0);
1767 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1768 cur = path->nodes[*level];
1770 if (btrfs_header_level(cur) != *level)
1773 if (path->slots[*level] >=
1774 btrfs_header_nritems(cur))
1777 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1778 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
1779 blocksize = btrfs_level_size(root, *level - 1);
1781 parent = path->nodes[*level];
1782 root_owner = btrfs_header_owner(parent);
1784 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
1789 ret = wc->process_func(root, next, wc, ptr_gen);
1793 path->slots[*level]++;
1795 btrfs_read_buffer(next, ptr_gen);
1797 btrfs_tree_lock(next);
1798 btrfs_set_lock_blocking(next);
1799 clean_tree_block(trans, root, next);
1800 btrfs_wait_tree_block_writeback(next);
1801 btrfs_tree_unlock(next);
1803 WARN_ON(root_owner !=
1804 BTRFS_TREE_LOG_OBJECTID);
1805 ret = btrfs_free_and_pin_reserved_extent(root,
1809 free_extent_buffer(next);
1812 btrfs_read_buffer(next, ptr_gen);
1814 WARN_ON(*level <= 0);
1815 if (path->nodes[*level-1])
1816 free_extent_buffer(path->nodes[*level-1]);
1817 path->nodes[*level-1] = next;
1818 *level = btrfs_header_level(next);
1819 path->slots[*level] = 0;
1822 WARN_ON(*level < 0);
1823 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1825 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
1831 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1832 struct btrfs_root *root,
1833 struct btrfs_path *path, int *level,
1834 struct walk_control *wc)
1841 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1842 slot = path->slots[i];
1843 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
1846 WARN_ON(*level == 0);
1849 struct extent_buffer *parent;
1850 if (path->nodes[*level] == root->node)
1851 parent = path->nodes[*level];
1853 parent = path->nodes[*level + 1];
1855 root_owner = btrfs_header_owner(parent);
1856 ret = wc->process_func(root, path->nodes[*level], wc,
1857 btrfs_header_generation(path->nodes[*level]));
1862 struct extent_buffer *next;
1864 next = path->nodes[*level];
1866 btrfs_tree_lock(next);
1867 btrfs_set_lock_blocking(next);
1868 clean_tree_block(trans, root, next);
1869 btrfs_wait_tree_block_writeback(next);
1870 btrfs_tree_unlock(next);
1872 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
1873 ret = btrfs_free_and_pin_reserved_extent(root,
1874 path->nodes[*level]->start,
1875 path->nodes[*level]->len);
1878 free_extent_buffer(path->nodes[*level]);
1879 path->nodes[*level] = NULL;
1887 * drop the reference count on the tree rooted at 'snap'. This traverses
1888 * the tree freeing any blocks that have a ref count of zero after being
1891 static int walk_log_tree(struct btrfs_trans_handle *trans,
1892 struct btrfs_root *log, struct walk_control *wc)
1897 struct btrfs_path *path;
1901 path = btrfs_alloc_path();
1905 level = btrfs_header_level(log->node);
1907 path->nodes[level] = log->node;
1908 extent_buffer_get(log->node);
1909 path->slots[level] = 0;
1912 wret = walk_down_log_tree(trans, log, path, &level, wc);
1918 wret = walk_up_log_tree(trans, log, path, &level, wc);
1925 /* was the root node processed? if not, catch it here */
1926 if (path->nodes[orig_level]) {
1927 wc->process_func(log, path->nodes[orig_level], wc,
1928 btrfs_header_generation(path->nodes[orig_level]));
1930 struct extent_buffer *next;
1932 next = path->nodes[orig_level];
1934 btrfs_tree_lock(next);
1935 btrfs_set_lock_blocking(next);
1936 clean_tree_block(trans, log, next);
1937 btrfs_wait_tree_block_writeback(next);
1938 btrfs_tree_unlock(next);
1940 WARN_ON(log->root_key.objectid !=
1941 BTRFS_TREE_LOG_OBJECTID);
1942 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
1948 for (i = 0; i <= orig_level; i++) {
1949 if (path->nodes[i]) {
1950 free_extent_buffer(path->nodes[i]);
1951 path->nodes[i] = NULL;
1954 btrfs_free_path(path);
1959 * helper function to update the item for a given subvolumes log root
1960 * in the tree of log roots
1962 static int update_log_root(struct btrfs_trans_handle *trans,
1963 struct btrfs_root *log)
1967 if (log->log_transid == 1) {
1968 /* insert root item on the first sync */
1969 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
1970 &log->root_key, &log->root_item);
1972 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
1973 &log->root_key, &log->root_item);
1978 static int wait_log_commit(struct btrfs_trans_handle *trans,
1979 struct btrfs_root *root, unsigned long transid)
1982 int index = transid % 2;
1985 * we only allow two pending log transactions at a time,
1986 * so we know that if ours is more than 2 older than the
1987 * current transaction, we're done
1990 prepare_to_wait(&root->log_commit_wait[index],
1991 &wait, TASK_UNINTERRUPTIBLE);
1992 mutex_unlock(&root->log_mutex);
1994 if (root->fs_info->last_trans_log_full_commit !=
1995 trans->transid && root->log_transid < transid + 2 &&
1996 atomic_read(&root->log_commit[index]))
1999 finish_wait(&root->log_commit_wait[index], &wait);
2000 mutex_lock(&root->log_mutex);
2001 } while (root->log_transid < transid + 2 &&
2002 atomic_read(&root->log_commit[index]));
2006 static int wait_for_writer(struct btrfs_trans_handle *trans,
2007 struct btrfs_root *root)
2010 while (atomic_read(&root->log_writers)) {
2011 prepare_to_wait(&root->log_writer_wait,
2012 &wait, TASK_UNINTERRUPTIBLE);
2013 mutex_unlock(&root->log_mutex);
2014 if (root->fs_info->last_trans_log_full_commit !=
2015 trans->transid && atomic_read(&root->log_writers))
2017 mutex_lock(&root->log_mutex);
2018 finish_wait(&root->log_writer_wait, &wait);
2024 * btrfs_sync_log does sends a given tree log down to the disk and
2025 * updates the super blocks to record it. When this call is done,
2026 * you know that any inodes previously logged are safely on disk only
2029 * Any other return value means you need to call btrfs_commit_transaction.
2030 * Some of the edge cases for fsyncing directories that have had unlinks
2031 * or renames done in the past mean that sometimes the only safe
2032 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2033 * that has happened.
2035 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2036 struct btrfs_root *root)
2042 struct btrfs_root *log = root->log_root;
2043 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2044 unsigned long log_transid = 0;
2046 mutex_lock(&root->log_mutex);
2047 index1 = root->log_transid % 2;
2048 if (atomic_read(&root->log_commit[index1])) {
2049 wait_log_commit(trans, root, root->log_transid);
2050 mutex_unlock(&root->log_mutex);
2053 atomic_set(&root->log_commit[index1], 1);
2055 /* wait for previous tree log sync to complete */
2056 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2057 wait_log_commit(trans, root, root->log_transid - 1);
2059 unsigned long batch = root->log_batch;
2060 /* when we're on an ssd, just kick the log commit out */
2061 if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
2062 mutex_unlock(&root->log_mutex);
2063 schedule_timeout_uninterruptible(1);
2064 mutex_lock(&root->log_mutex);
2066 wait_for_writer(trans, root);
2067 if (batch == root->log_batch)
2071 /* bail out if we need to do a full commit */
2072 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2074 mutex_unlock(&root->log_mutex);
2078 log_transid = root->log_transid;
2079 if (log_transid % 2 == 0)
2080 mark = EXTENT_DIRTY;
2084 /* we start IO on all the marked extents here, but we don't actually
2085 * wait for them until later.
2087 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2090 btrfs_set_root_node(&log->root_item, log->node);
2092 root->log_batch = 0;
2093 root->log_transid++;
2094 log->log_transid = root->log_transid;
2095 root->log_start_pid = 0;
2098 * IO has been started, blocks of the log tree have WRITTEN flag set
2099 * in their headers. new modifications of the log will be written to
2100 * new positions. so it's safe to allow log writers to go in.
2102 mutex_unlock(&root->log_mutex);
2104 mutex_lock(&log_root_tree->log_mutex);
2105 log_root_tree->log_batch++;
2106 atomic_inc(&log_root_tree->log_writers);
2107 mutex_unlock(&log_root_tree->log_mutex);
2109 ret = update_log_root(trans, log);
2111 mutex_lock(&log_root_tree->log_mutex);
2112 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2114 if (waitqueue_active(&log_root_tree->log_writer_wait))
2115 wake_up(&log_root_tree->log_writer_wait);
2119 BUG_ON(ret != -ENOSPC);
2120 root->fs_info->last_trans_log_full_commit = trans->transid;
2121 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2122 mutex_unlock(&log_root_tree->log_mutex);
2127 index2 = log_root_tree->log_transid % 2;
2128 if (atomic_read(&log_root_tree->log_commit[index2])) {
2129 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2130 wait_log_commit(trans, log_root_tree,
2131 log_root_tree->log_transid);
2132 mutex_unlock(&log_root_tree->log_mutex);
2136 atomic_set(&log_root_tree->log_commit[index2], 1);
2138 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2139 wait_log_commit(trans, log_root_tree,
2140 log_root_tree->log_transid - 1);
2143 wait_for_writer(trans, log_root_tree);
2146 * now that we've moved on to the tree of log tree roots,
2147 * check the full commit flag again
2149 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2150 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2151 mutex_unlock(&log_root_tree->log_mutex);
2153 goto out_wake_log_root;
2156 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2157 &log_root_tree->dirty_log_pages,
2158 EXTENT_DIRTY | EXTENT_NEW);
2160 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2162 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2163 log_root_tree->node->start);
2164 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2165 btrfs_header_level(log_root_tree->node));
2167 log_root_tree->log_batch = 0;
2168 log_root_tree->log_transid++;
2171 mutex_unlock(&log_root_tree->log_mutex);
2174 * nobody else is going to jump in and write the the ctree
2175 * super here because the log_commit atomic below is protecting
2176 * us. We must be called with a transaction handle pinning
2177 * the running transaction open, so a full commit can't hop
2178 * in and cause problems either.
2180 btrfs_scrub_pause_super(root);
2181 write_ctree_super(trans, root->fs_info->tree_root, 1);
2182 btrfs_scrub_continue_super(root);
2185 mutex_lock(&root->log_mutex);
2186 if (root->last_log_commit < log_transid)
2187 root->last_log_commit = log_transid;
2188 mutex_unlock(&root->log_mutex);
2191 atomic_set(&log_root_tree->log_commit[index2], 0);
2193 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2194 wake_up(&log_root_tree->log_commit_wait[index2]);
2196 atomic_set(&root->log_commit[index1], 0);
2198 if (waitqueue_active(&root->log_commit_wait[index1]))
2199 wake_up(&root->log_commit_wait[index1]);
2203 static void free_log_tree(struct btrfs_trans_handle *trans,
2204 struct btrfs_root *log)
2209 struct walk_control wc = {
2211 .process_func = process_one_buffer
2214 ret = walk_log_tree(trans, log, &wc);
2218 ret = find_first_extent_bit(&log->dirty_log_pages,
2219 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
2223 clear_extent_bits(&log->dirty_log_pages, start, end,
2224 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2227 free_extent_buffer(log->node);
2232 * free all the extents used by the tree log. This should be called
2233 * at commit time of the full transaction
2235 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2237 if (root->log_root) {
2238 free_log_tree(trans, root->log_root);
2239 root->log_root = NULL;
2244 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2245 struct btrfs_fs_info *fs_info)
2247 if (fs_info->log_root_tree) {
2248 free_log_tree(trans, fs_info->log_root_tree);
2249 fs_info->log_root_tree = NULL;
2255 * If both a file and directory are logged, and unlinks or renames are
2256 * mixed in, we have a few interesting corners:
2258 * create file X in dir Y
2259 * link file X to X.link in dir Y
2261 * unlink file X but leave X.link
2264 * After a crash we would expect only X.link to exist. But file X
2265 * didn't get fsync'd again so the log has back refs for X and X.link.
2267 * We solve this by removing directory entries and inode backrefs from the
2268 * log when a file that was logged in the current transaction is
2269 * unlinked. Any later fsync will include the updated log entries, and
2270 * we'll be able to reconstruct the proper directory items from backrefs.
2272 * This optimizations allows us to avoid relogging the entire inode
2273 * or the entire directory.
2275 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2276 struct btrfs_root *root,
2277 const char *name, int name_len,
2278 struct inode *dir, u64 index)
2280 struct btrfs_root *log;
2281 struct btrfs_dir_item *di;
2282 struct btrfs_path *path;
2286 u64 dir_ino = btrfs_ino(dir);
2288 if (BTRFS_I(dir)->logged_trans < trans->transid)
2291 ret = join_running_log_trans(root);
2295 mutex_lock(&BTRFS_I(dir)->log_mutex);
2297 log = root->log_root;
2298 path = btrfs_alloc_path();
2304 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2305 name, name_len, -1);
2311 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2312 bytes_del += name_len;
2315 btrfs_release_path(path);
2316 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2317 index, name, name_len, -1);
2323 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2324 bytes_del += name_len;
2328 /* update the directory size in the log to reflect the names
2332 struct btrfs_key key;
2334 key.objectid = dir_ino;
2336 key.type = BTRFS_INODE_ITEM_KEY;
2337 btrfs_release_path(path);
2339 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2345 struct btrfs_inode_item *item;
2348 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2349 struct btrfs_inode_item);
2350 i_size = btrfs_inode_size(path->nodes[0], item);
2351 if (i_size > bytes_del)
2352 i_size -= bytes_del;
2355 btrfs_set_inode_size(path->nodes[0], item, i_size);
2356 btrfs_mark_buffer_dirty(path->nodes[0]);
2359 btrfs_release_path(path);
2362 btrfs_free_path(path);
2364 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2365 if (ret == -ENOSPC) {
2366 root->fs_info->last_trans_log_full_commit = trans->transid;
2369 btrfs_end_log_trans(root);
2374 /* see comments for btrfs_del_dir_entries_in_log */
2375 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2376 struct btrfs_root *root,
2377 const char *name, int name_len,
2378 struct inode *inode, u64 dirid)
2380 struct btrfs_root *log;
2384 if (BTRFS_I(inode)->logged_trans < trans->transid)
2387 ret = join_running_log_trans(root);
2390 log = root->log_root;
2391 mutex_lock(&BTRFS_I(inode)->log_mutex);
2393 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2395 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2396 if (ret == -ENOSPC) {
2397 root->fs_info->last_trans_log_full_commit = trans->transid;
2400 btrfs_end_log_trans(root);
2406 * creates a range item in the log for 'dirid'. first_offset and
2407 * last_offset tell us which parts of the key space the log should
2408 * be considered authoritative for.
2410 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2411 struct btrfs_root *log,
2412 struct btrfs_path *path,
2413 int key_type, u64 dirid,
2414 u64 first_offset, u64 last_offset)
2417 struct btrfs_key key;
2418 struct btrfs_dir_log_item *item;
2420 key.objectid = dirid;
2421 key.offset = first_offset;
2422 if (key_type == BTRFS_DIR_ITEM_KEY)
2423 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2425 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2426 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2430 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2431 struct btrfs_dir_log_item);
2432 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2433 btrfs_mark_buffer_dirty(path->nodes[0]);
2434 btrfs_release_path(path);
2439 * log all the items included in the current transaction for a given
2440 * directory. This also creates the range items in the log tree required
2441 * to replay anything deleted before the fsync
2443 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2444 struct btrfs_root *root, struct inode *inode,
2445 struct btrfs_path *path,
2446 struct btrfs_path *dst_path, int key_type,
2447 u64 min_offset, u64 *last_offset_ret)
2449 struct btrfs_key min_key;
2450 struct btrfs_key max_key;
2451 struct btrfs_root *log = root->log_root;
2452 struct extent_buffer *src;
2457 u64 first_offset = min_offset;
2458 u64 last_offset = (u64)-1;
2459 u64 ino = btrfs_ino(inode);
2461 log = root->log_root;
2462 max_key.objectid = ino;
2463 max_key.offset = (u64)-1;
2464 max_key.type = key_type;
2466 min_key.objectid = ino;
2467 min_key.type = key_type;
2468 min_key.offset = min_offset;
2470 path->keep_locks = 1;
2472 ret = btrfs_search_forward(root, &min_key, &max_key,
2473 path, 0, trans->transid);
2476 * we didn't find anything from this transaction, see if there
2477 * is anything at all
2479 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
2480 min_key.objectid = ino;
2481 min_key.type = key_type;
2482 min_key.offset = (u64)-1;
2483 btrfs_release_path(path);
2484 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2486 btrfs_release_path(path);
2489 ret = btrfs_previous_item(root, path, ino, key_type);
2491 /* if ret == 0 there are items for this type,
2492 * create a range to tell us the last key of this type.
2493 * otherwise, there are no items in this directory after
2494 * *min_offset, and we create a range to indicate that.
2497 struct btrfs_key tmp;
2498 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
2500 if (key_type == tmp.type)
2501 first_offset = max(min_offset, tmp.offset) + 1;
2506 /* go backward to find any previous key */
2507 ret = btrfs_previous_item(root, path, ino, key_type);
2509 struct btrfs_key tmp;
2510 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2511 if (key_type == tmp.type) {
2512 first_offset = tmp.offset;
2513 ret = overwrite_item(trans, log, dst_path,
2514 path->nodes[0], path->slots[0],
2522 btrfs_release_path(path);
2524 /* find the first key from this transaction again */
2525 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2532 * we have a block from this transaction, log every item in it
2533 * from our directory
2536 struct btrfs_key tmp;
2537 src = path->nodes[0];
2538 nritems = btrfs_header_nritems(src);
2539 for (i = path->slots[0]; i < nritems; i++) {
2540 btrfs_item_key_to_cpu(src, &min_key, i);
2542 if (min_key.objectid != ino || min_key.type != key_type)
2544 ret = overwrite_item(trans, log, dst_path, src, i,
2551 path->slots[0] = nritems;
2554 * look ahead to the next item and see if it is also
2555 * from this directory and from this transaction
2557 ret = btrfs_next_leaf(root, path);
2559 last_offset = (u64)-1;
2562 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2563 if (tmp.objectid != ino || tmp.type != key_type) {
2564 last_offset = (u64)-1;
2567 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
2568 ret = overwrite_item(trans, log, dst_path,
2569 path->nodes[0], path->slots[0],
2574 last_offset = tmp.offset;
2579 btrfs_release_path(path);
2580 btrfs_release_path(dst_path);
2583 *last_offset_ret = last_offset;
2585 * insert the log range keys to indicate where the log
2588 ret = insert_dir_log_key(trans, log, path, key_type,
2589 ino, first_offset, last_offset);
2597 * logging directories is very similar to logging inodes, We find all the items
2598 * from the current transaction and write them to the log.
2600 * The recovery code scans the directory in the subvolume, and if it finds a
2601 * key in the range logged that is not present in the log tree, then it means
2602 * that dir entry was unlinked during the transaction.
2604 * In order for that scan to work, we must include one key smaller than
2605 * the smallest logged by this transaction and one key larger than the largest
2606 * key logged by this transaction.
2608 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
2609 struct btrfs_root *root, struct inode *inode,
2610 struct btrfs_path *path,
2611 struct btrfs_path *dst_path)
2616 int key_type = BTRFS_DIR_ITEM_KEY;
2622 ret = log_dir_items(trans, root, inode, path,
2623 dst_path, key_type, min_key,
2627 if (max_key == (u64)-1)
2629 min_key = max_key + 1;
2632 if (key_type == BTRFS_DIR_ITEM_KEY) {
2633 key_type = BTRFS_DIR_INDEX_KEY;
2640 * a helper function to drop items from the log before we relog an
2641 * inode. max_key_type indicates the highest item type to remove.
2642 * This cannot be run for file data extents because it does not
2643 * free the extents they point to.
2645 static int drop_objectid_items(struct btrfs_trans_handle *trans,
2646 struct btrfs_root *log,
2647 struct btrfs_path *path,
2648 u64 objectid, int max_key_type)
2651 struct btrfs_key key;
2652 struct btrfs_key found_key;
2654 key.objectid = objectid;
2655 key.type = max_key_type;
2656 key.offset = (u64)-1;
2659 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
2664 if (path->slots[0] == 0)
2668 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2671 if (found_key.objectid != objectid)
2674 ret = btrfs_del_item(trans, log, path);
2677 btrfs_release_path(path);
2679 btrfs_release_path(path);
2683 static noinline int copy_items(struct btrfs_trans_handle *trans,
2684 struct btrfs_root *log,
2685 struct btrfs_path *dst_path,
2686 struct extent_buffer *src,
2687 int start_slot, int nr, int inode_only)
2689 unsigned long src_offset;
2690 unsigned long dst_offset;
2691 struct btrfs_file_extent_item *extent;
2692 struct btrfs_inode_item *inode_item;
2694 struct btrfs_key *ins_keys;
2698 struct list_head ordered_sums;
2700 INIT_LIST_HEAD(&ordered_sums);
2702 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
2703 nr * sizeof(u32), GFP_NOFS);
2707 ins_sizes = (u32 *)ins_data;
2708 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
2710 for (i = 0; i < nr; i++) {
2711 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
2712 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
2714 ret = btrfs_insert_empty_items(trans, log, dst_path,
2715 ins_keys, ins_sizes, nr);
2721 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
2722 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
2723 dst_path->slots[0]);
2725 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
2727 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
2728 src_offset, ins_sizes[i]);
2730 if (inode_only == LOG_INODE_EXISTS &&
2731 ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
2732 inode_item = btrfs_item_ptr(dst_path->nodes[0],
2734 struct btrfs_inode_item);
2735 btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0);
2737 /* set the generation to zero so the recover code
2738 * can tell the difference between an logging
2739 * just to say 'this inode exists' and a logging
2740 * to say 'update this inode with these values'
2742 btrfs_set_inode_generation(dst_path->nodes[0],
2745 /* take a reference on file data extents so that truncates
2746 * or deletes of this inode don't have to relog the inode
2749 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) {
2751 extent = btrfs_item_ptr(src, start_slot + i,
2752 struct btrfs_file_extent_item);
2754 if (btrfs_file_extent_generation(src, extent) < trans->transid)
2757 found_type = btrfs_file_extent_type(src, extent);
2758 if (found_type == BTRFS_FILE_EXTENT_REG ||
2759 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
2761 ds = btrfs_file_extent_disk_bytenr(src,
2763 /* ds == 0 is a hole */
2767 dl = btrfs_file_extent_disk_num_bytes(src,
2769 cs = btrfs_file_extent_offset(src, extent);
2770 cl = btrfs_file_extent_num_bytes(src,
2772 if (btrfs_file_extent_compression(src,
2778 ret = btrfs_lookup_csums_range(
2779 log->fs_info->csum_root,
2780 ds + cs, ds + cs + cl - 1,
2787 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
2788 btrfs_release_path(dst_path);
2792 * we have to do this after the loop above to avoid changing the
2793 * log tree while trying to change the log tree.
2796 while (!list_empty(&ordered_sums)) {
2797 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
2798 struct btrfs_ordered_sum,
2801 ret = btrfs_csum_file_blocks(trans, log, sums);
2802 list_del(&sums->list);
2808 /* log a single inode in the tree log.
2809 * At least one parent directory for this inode must exist in the tree
2810 * or be logged already.
2812 * Any items from this inode changed by the current transaction are copied
2813 * to the log tree. An extra reference is taken on any extents in this
2814 * file, allowing us to avoid a whole pile of corner cases around logging
2815 * blocks that have been removed from the tree.
2817 * See LOG_INODE_ALL and related defines for a description of what inode_only
2820 * This handles both files and directories.
2822 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2823 struct btrfs_root *root, struct inode *inode,
2826 struct btrfs_path *path;
2827 struct btrfs_path *dst_path;
2828 struct btrfs_key min_key;
2829 struct btrfs_key max_key;
2830 struct btrfs_root *log = root->log_root;
2831 struct extent_buffer *src = NULL;
2835 int ins_start_slot = 0;
2837 u64 ino = btrfs_ino(inode);
2839 log = root->log_root;
2841 path = btrfs_alloc_path();
2844 dst_path = btrfs_alloc_path();
2846 btrfs_free_path(path);
2850 min_key.objectid = ino;
2851 min_key.type = BTRFS_INODE_ITEM_KEY;
2854 max_key.objectid = ino;
2856 /* today the code can only do partial logging of directories */
2857 if (!S_ISDIR(inode->i_mode))
2858 inode_only = LOG_INODE_ALL;
2860 if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
2861 max_key.type = BTRFS_XATTR_ITEM_KEY;
2863 max_key.type = (u8)-1;
2864 max_key.offset = (u64)-1;
2866 ret = btrfs_commit_inode_delayed_items(trans, inode);
2868 btrfs_free_path(path);
2869 btrfs_free_path(dst_path);
2873 mutex_lock(&BTRFS_I(inode)->log_mutex);
2876 * a brute force approach to making sure we get the most uptodate
2877 * copies of everything.
2879 if (S_ISDIR(inode->i_mode)) {
2880 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
2882 if (inode_only == LOG_INODE_EXISTS)
2883 max_key_type = BTRFS_XATTR_ITEM_KEY;
2884 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
2886 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
2892 path->keep_locks = 1;
2896 ret = btrfs_search_forward(root, &min_key, &max_key,
2897 path, 0, trans->transid);
2901 /* note, ins_nr might be > 0 here, cleanup outside the loop */
2902 if (min_key.objectid != ino)
2904 if (min_key.type > max_key.type)
2907 src = path->nodes[0];
2908 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
2911 } else if (!ins_nr) {
2912 ins_start_slot = path->slots[0];
2917 ret = copy_items(trans, log, dst_path, src, ins_start_slot,
2918 ins_nr, inode_only);
2924 ins_start_slot = path->slots[0];
2927 nritems = btrfs_header_nritems(path->nodes[0]);
2929 if (path->slots[0] < nritems) {
2930 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
2935 ret = copy_items(trans, log, dst_path, src,
2937 ins_nr, inode_only);
2944 btrfs_release_path(path);
2946 if (min_key.offset < (u64)-1)
2948 else if (min_key.type < (u8)-1)
2950 else if (min_key.objectid < (u64)-1)
2956 ret = copy_items(trans, log, dst_path, src,
2958 ins_nr, inode_only);
2966 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
2967 btrfs_release_path(path);
2968 btrfs_release_path(dst_path);
2969 ret = log_directory_changes(trans, root, inode, path, dst_path);
2975 BTRFS_I(inode)->logged_trans = trans->transid;
2977 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2979 btrfs_free_path(path);
2980 btrfs_free_path(dst_path);
2985 * follow the dentry parent pointers up the chain and see if any
2986 * of the directories in it require a full commit before they can
2987 * be logged. Returns zero if nothing special needs to be done or 1 if
2988 * a full commit is required.
2990 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
2991 struct inode *inode,
2992 struct dentry *parent,
2993 struct super_block *sb,
2997 struct btrfs_root *root;
2998 struct dentry *old_parent = NULL;
3001 * for regular files, if its inode is already on disk, we don't
3002 * have to worry about the parents at all. This is because
3003 * we can use the last_unlink_trans field to record renames
3004 * and other fun in this file.
3006 if (S_ISREG(inode->i_mode) &&
3007 BTRFS_I(inode)->generation <= last_committed &&
3008 BTRFS_I(inode)->last_unlink_trans <= last_committed)
3011 if (!S_ISDIR(inode->i_mode)) {
3012 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3014 inode = parent->d_inode;
3018 BTRFS_I(inode)->logged_trans = trans->transid;
3021 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
3022 root = BTRFS_I(inode)->root;
3025 * make sure any commits to the log are forced
3026 * to be full commits
3028 root->fs_info->last_trans_log_full_commit =
3034 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3037 if (IS_ROOT(parent))
3040 parent = dget_parent(parent);
3042 old_parent = parent;
3043 inode = parent->d_inode;
3051 static int inode_in_log(struct btrfs_trans_handle *trans,
3052 struct inode *inode)
3054 struct btrfs_root *root = BTRFS_I(inode)->root;
3057 mutex_lock(&root->log_mutex);
3058 if (BTRFS_I(inode)->logged_trans == trans->transid &&
3059 BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
3061 mutex_unlock(&root->log_mutex);
3067 * helper function around btrfs_log_inode to make sure newly created
3068 * parent directories also end up in the log. A minimal inode and backref
3069 * only logging is done of any parent directories that are older than
3070 * the last committed transaction
3072 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3073 struct btrfs_root *root, struct inode *inode,
3074 struct dentry *parent, int exists_only)
3076 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
3077 struct super_block *sb;
3078 struct dentry *old_parent = NULL;
3080 u64 last_committed = root->fs_info->last_trans_committed;
3084 if (btrfs_test_opt(root, NOTREELOG)) {
3089 if (root->fs_info->last_trans_log_full_commit >
3090 root->fs_info->last_trans_committed) {
3095 if (root != BTRFS_I(inode)->root ||
3096 btrfs_root_refs(&root->root_item) == 0) {
3101 ret = check_parent_dirs_for_sync(trans, inode, parent,
3102 sb, last_committed);
3106 if (inode_in_log(trans, inode)) {
3107 ret = BTRFS_NO_LOG_SYNC;
3111 ret = start_log_trans(trans, root);
3115 ret = btrfs_log_inode(trans, root, inode, inode_only);
3120 * for regular files, if its inode is already on disk, we don't
3121 * have to worry about the parents at all. This is because
3122 * we can use the last_unlink_trans field to record renames
3123 * and other fun in this file.
3125 if (S_ISREG(inode->i_mode) &&
3126 BTRFS_I(inode)->generation <= last_committed &&
3127 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
3132 inode_only = LOG_INODE_EXISTS;
3134 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3137 inode = parent->d_inode;
3138 if (root != BTRFS_I(inode)->root)
3141 if (BTRFS_I(inode)->generation >
3142 root->fs_info->last_trans_committed) {
3143 ret = btrfs_log_inode(trans, root, inode, inode_only);
3147 if (IS_ROOT(parent))
3150 parent = dget_parent(parent);
3152 old_parent = parent;
3158 BUG_ON(ret != -ENOSPC);
3159 root->fs_info->last_trans_log_full_commit = trans->transid;
3162 btrfs_end_log_trans(root);
3168 * it is not safe to log dentry if the chunk root has added new
3169 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
3170 * If this returns 1, you must commit the transaction to safely get your
3173 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
3174 struct btrfs_root *root, struct dentry *dentry)
3176 struct dentry *parent = dget_parent(dentry);
3179 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0);
3186 * should be called during mount to recover any replay any log trees
3189 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
3192 struct btrfs_path *path;
3193 struct btrfs_trans_handle *trans;
3194 struct btrfs_key key;
3195 struct btrfs_key found_key;
3196 struct btrfs_key tmp_key;
3197 struct btrfs_root *log;
3198 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
3199 struct walk_control wc = {
3200 .process_func = process_one_buffer,
3204 path = btrfs_alloc_path();
3208 fs_info->log_root_recovering = 1;
3210 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3211 BUG_ON(IS_ERR(trans));
3216 ret = walk_log_tree(trans, log_root_tree, &wc);
3220 key.objectid = BTRFS_TREE_LOG_OBJECTID;
3221 key.offset = (u64)-1;
3222 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
3225 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
3229 if (path->slots[0] == 0)
3233 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3235 btrfs_release_path(path);
3236 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
3239 log = btrfs_read_fs_root_no_radix(log_root_tree,
3241 BUG_ON(IS_ERR(log));
3243 tmp_key.objectid = found_key.offset;
3244 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
3245 tmp_key.offset = (u64)-1;
3247 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3248 BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
3250 wc.replay_dest->log_root = log;
3251 btrfs_record_root_in_trans(trans, wc.replay_dest);
3252 ret = walk_log_tree(trans, log, &wc);
3255 if (wc.stage == LOG_WALK_REPLAY_ALL) {
3256 ret = fixup_inode_link_counts(trans, wc.replay_dest,
3261 key.offset = found_key.offset - 1;
3262 wc.replay_dest->log_root = NULL;
3263 free_extent_buffer(log->node);
3264 free_extent_buffer(log->commit_root);
3267 if (found_key.offset == 0)
3270 btrfs_release_path(path);
3272 /* step one is to pin it all, step two is to replay just inodes */
3275 wc.process_func = replay_one_buffer;
3276 wc.stage = LOG_WALK_REPLAY_INODES;
3279 /* step three is to replay everything */
3280 if (wc.stage < LOG_WALK_REPLAY_ALL) {
3285 btrfs_free_path(path);
3287 free_extent_buffer(log_root_tree->node);
3288 log_root_tree->log_root = NULL;
3289 fs_info->log_root_recovering = 0;
3291 /* step 4: commit the transaction, which also unpins the blocks */
3292 btrfs_commit_transaction(trans, fs_info->tree_root);
3294 kfree(log_root_tree);
3299 * there are some corner cases where we want to force a full
3300 * commit instead of allowing a directory to be logged.
3302 * They revolve around files there were unlinked from the directory, and
3303 * this function updates the parent directory so that a full commit is
3304 * properly done if it is fsync'd later after the unlinks are done.
3306 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
3307 struct inode *dir, struct inode *inode,
3311 * when we're logging a file, if it hasn't been renamed
3312 * or unlinked, and its inode is fully committed on disk,
3313 * we don't have to worry about walking up the directory chain
3314 * to log its parents.
3316 * So, we use the last_unlink_trans field to put this transid
3317 * into the file. When the file is logged we check it and
3318 * don't log the parents if the file is fully on disk.
3320 if (S_ISREG(inode->i_mode))
3321 BTRFS_I(inode)->last_unlink_trans = trans->transid;
3324 * if this directory was already logged any new
3325 * names for this file/dir will get recorded
3328 if (BTRFS_I(dir)->logged_trans == trans->transid)
3332 * if the inode we're about to unlink was logged,
3333 * the log will be properly updated for any new names
3335 if (BTRFS_I(inode)->logged_trans == trans->transid)
3339 * when renaming files across directories, if the directory
3340 * there we're unlinking from gets fsync'd later on, there's
3341 * no way to find the destination directory later and fsync it
3342 * properly. So, we have to be conservative and force commits
3343 * so the new name gets discovered.
3348 /* we can safely do the unlink without any special recording */
3352 BTRFS_I(dir)->last_unlink_trans = trans->transid;
3356 * Call this after adding a new name for a file and it will properly
3357 * update the log to reflect the new name.
3359 * It will return zero if all goes well, and it will return 1 if a
3360 * full transaction commit is required.
3362 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
3363 struct inode *inode, struct inode *old_dir,
3364 struct dentry *parent)
3366 struct btrfs_root * root = BTRFS_I(inode)->root;
3369 * this will force the logging code to walk the dentry chain
3372 if (S_ISREG(inode->i_mode))
3373 BTRFS_I(inode)->last_unlink_trans = trans->transid;
3376 * if this inode hasn't been logged and directory we're renaming it
3377 * from hasn't been logged, we don't need to log it
3379 if (BTRFS_I(inode)->logged_trans <=
3380 root->fs_info->last_trans_committed &&
3381 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
3382 root->fs_info->last_trans_committed))
3385 return btrfs_log_inode_parent(trans, root, inode, parent, 1);