2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/list_sort.h>
23 #include "transaction.h"
26 #include "print-tree.h"
30 /* magic values for the inode_only field in btrfs_log_inode:
32 * LOG_INODE_ALL means to log everything
33 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 #define LOG_INODE_ALL 0
37 #define LOG_INODE_EXISTS 1
40 * directory trouble cases
42 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
43 * log, we must force a full commit before doing an fsync of the directory
44 * where the unlink was done.
45 * ---> record transid of last unlink/rename per directory
49 * rename foo/some_dir foo2/some_dir
51 * fsync foo/some_dir/some_file
53 * The fsync above will unlink the original some_dir without recording
54 * it in its new location (foo2). After a crash, some_dir will be gone
55 * unless the fsync of some_file forces a full commit
57 * 2) we must log any new names for any file or dir that is in the fsync
58 * log. ---> check inode while renaming/linking.
60 * 2a) we must log any new names for any file or dir during rename
61 * when the directory they are being removed from was logged.
62 * ---> check inode and old parent dir during rename
64 * 2a is actually the more important variant. With the extra logging
65 * a crash might unlink the old name without recreating the new one
67 * 3) after a crash, we must go through any directories with a link count
68 * of zero and redo the rm -rf
75 * The directory f1 was fully removed from the FS, but fsync was never
76 * called on f1, only its parent dir. After a crash the rm -rf must
77 * be replayed. This must be able to recurse down the entire
78 * directory tree. The inode link count fixup code takes care of the
83 * stages for the tree walking. The first
84 * stage (0) is to only pin down the blocks we find
85 * the second stage (1) is to make sure that all the inodes
86 * we find in the log are created in the subvolume.
88 * The last stage is to deal with directories and links and extents
89 * and all the other fun semantics
91 #define LOG_WALK_PIN_ONLY 0
92 #define LOG_WALK_REPLAY_INODES 1
93 #define LOG_WALK_REPLAY_ALL 2
95 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
96 struct btrfs_root *root, struct inode *inode,
98 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
99 struct btrfs_root *root,
100 struct btrfs_path *path, u64 objectid);
101 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
102 struct btrfs_root *root,
103 struct btrfs_root *log,
104 struct btrfs_path *path,
105 u64 dirid, int del_all);
108 * tree logging is a special write ahead log used to make sure that
109 * fsyncs and O_SYNCs can happen without doing full tree commits.
111 * Full tree commits are expensive because they require commonly
112 * modified blocks to be recowed, creating many dirty pages in the
113 * extent tree an 4x-6x higher write load than ext3.
115 * Instead of doing a tree commit on every fsync, we use the
116 * key ranges and transaction ids to find items for a given file or directory
117 * that have changed in this transaction. Those items are copied into
118 * a special tree (one per subvolume root), that tree is written to disk
119 * and then the fsync is considered complete.
121 * After a crash, items are copied out of the log-tree back into the
122 * subvolume tree. Any file data extents found are recorded in the extent
123 * allocation tree, and the log-tree freed.
125 * The log tree is read three times, once to pin down all the extents it is
126 * using in ram and once, once to create all the inodes logged in the tree
127 * and once to do all the other items.
131 * start a sub transaction and setup the log tree
132 * this increments the log tree writer count to make the people
133 * syncing the tree wait for us to finish
135 static int start_log_trans(struct btrfs_trans_handle *trans,
136 struct btrfs_root *root)
141 mutex_lock(&root->log_mutex);
142 if (root->log_root) {
143 if (!root->log_start_pid) {
144 root->log_start_pid = current->pid;
145 root->log_multiple_pids = false;
146 } else if (root->log_start_pid != current->pid) {
147 root->log_multiple_pids = true;
151 atomic_inc(&root->log_writers);
152 mutex_unlock(&root->log_mutex);
155 root->log_multiple_pids = false;
156 root->log_start_pid = current->pid;
157 mutex_lock(&root->fs_info->tree_log_mutex);
158 if (!root->fs_info->log_root_tree) {
159 ret = btrfs_init_log_root_tree(trans, root->fs_info);
163 if (err == 0 && !root->log_root) {
164 ret = btrfs_add_log_tree(trans, root);
168 mutex_unlock(&root->fs_info->tree_log_mutex);
170 atomic_inc(&root->log_writers);
171 mutex_unlock(&root->log_mutex);
176 * returns 0 if there was a log transaction running and we were able
177 * to join, or returns -ENOENT if there were not transactions
180 static int join_running_log_trans(struct btrfs_root *root)
188 mutex_lock(&root->log_mutex);
189 if (root->log_root) {
191 atomic_inc(&root->log_writers);
193 mutex_unlock(&root->log_mutex);
198 * This either makes the current running log transaction wait
199 * until you call btrfs_end_log_trans() or it makes any future
200 * log transactions wait until you call btrfs_end_log_trans()
202 int btrfs_pin_log_trans(struct btrfs_root *root)
206 mutex_lock(&root->log_mutex);
207 atomic_inc(&root->log_writers);
208 mutex_unlock(&root->log_mutex);
213 * indicate we're done making changes to the log tree
214 * and wake up anyone waiting to do a sync
216 void btrfs_end_log_trans(struct btrfs_root *root)
218 if (atomic_dec_and_test(&root->log_writers)) {
220 if (waitqueue_active(&root->log_writer_wait))
221 wake_up(&root->log_writer_wait);
227 * the walk control struct is used to pass state down the chain when
228 * processing the log tree. The stage field tells us which part
229 * of the log tree processing we are currently doing. The others
230 * are state fields used for that specific part
232 struct walk_control {
233 /* should we free the extent on disk when done? This is used
234 * at transaction commit time while freeing a log tree
238 /* should we write out the extent buffer? This is used
239 * while flushing the log tree to disk during a sync
243 /* should we wait for the extent buffer io to finish? Also used
244 * while flushing the log tree to disk for a sync
248 /* pin only walk, we record which extents on disk belong to the
253 /* what stage of the replay code we're currently in */
256 /* the root we are currently replaying */
257 struct btrfs_root *replay_dest;
259 /* the trans handle for the current replay */
260 struct btrfs_trans_handle *trans;
262 /* the function that gets used to process blocks we find in the
263 * tree. Note the extent_buffer might not be up to date when it is
264 * passed in, and it must be checked or read if you need the data
267 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
268 struct walk_control *wc, u64 gen);
272 * process_func used to pin down extents, write them or wait on them
274 static int process_one_buffer(struct btrfs_root *log,
275 struct extent_buffer *eb,
276 struct walk_control *wc, u64 gen)
279 btrfs_pin_extent_for_log_replay(wc->trans,
280 log->fs_info->extent_root,
283 if (btrfs_buffer_uptodate(eb, gen, 0)) {
285 btrfs_write_tree_block(eb);
287 btrfs_wait_tree_block_writeback(eb);
293 * Item overwrite used by replay and tree logging. eb, slot and key all refer
294 * to the src data we are copying out.
296 * root is the tree we are copying into, and path is a scratch
297 * path for use in this function (it should be released on entry and
298 * will be released on exit).
300 * If the key is already in the destination tree the existing item is
301 * overwritten. If the existing item isn't big enough, it is extended.
302 * If it is too large, it is truncated.
304 * If the key isn't in the destination yet, a new item is inserted.
306 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
307 struct btrfs_root *root,
308 struct btrfs_path *path,
309 struct extent_buffer *eb, int slot,
310 struct btrfs_key *key)
314 u64 saved_i_size = 0;
315 int save_old_i_size = 0;
316 unsigned long src_ptr;
317 unsigned long dst_ptr;
318 int overwrite_root = 0;
320 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
323 item_size = btrfs_item_size_nr(eb, slot);
324 src_ptr = btrfs_item_ptr_offset(eb, slot);
326 /* look for the key in the destination tree */
327 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
331 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
333 if (dst_size != item_size)
336 if (item_size == 0) {
337 btrfs_release_path(path);
340 dst_copy = kmalloc(item_size, GFP_NOFS);
341 src_copy = kmalloc(item_size, GFP_NOFS);
342 if (!dst_copy || !src_copy) {
343 btrfs_release_path(path);
349 read_extent_buffer(eb, src_copy, src_ptr, item_size);
351 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
352 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
354 ret = memcmp(dst_copy, src_copy, item_size);
359 * they have the same contents, just return, this saves
360 * us from cowing blocks in the destination tree and doing
361 * extra writes that may not have been done by a previous
365 btrfs_release_path(path);
371 btrfs_release_path(path);
372 /* try to insert the key into the destination tree */
373 ret = btrfs_insert_empty_item(trans, root, path,
376 /* make sure any existing item is the correct size */
377 if (ret == -EEXIST) {
379 found_size = btrfs_item_size_nr(path->nodes[0],
381 if (found_size > item_size)
382 btrfs_truncate_item(trans, root, path, item_size, 1);
383 else if (found_size < item_size)
384 btrfs_extend_item(trans, root, path,
385 item_size - found_size);
389 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
392 /* don't overwrite an existing inode if the generation number
393 * was logged as zero. This is done when the tree logging code
394 * is just logging an inode to make sure it exists after recovery.
396 * Also, don't overwrite i_size on directories during replay.
397 * log replay inserts and removes directory items based on the
398 * state of the tree found in the subvolume, and i_size is modified
401 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
402 struct btrfs_inode_item *src_item;
403 struct btrfs_inode_item *dst_item;
405 src_item = (struct btrfs_inode_item *)src_ptr;
406 dst_item = (struct btrfs_inode_item *)dst_ptr;
408 if (btrfs_inode_generation(eb, src_item) == 0)
411 if (overwrite_root &&
412 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
413 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
415 saved_i_size = btrfs_inode_size(path->nodes[0],
420 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
423 if (save_old_i_size) {
424 struct btrfs_inode_item *dst_item;
425 dst_item = (struct btrfs_inode_item *)dst_ptr;
426 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
429 /* make sure the generation is filled in */
430 if (key->type == BTRFS_INODE_ITEM_KEY) {
431 struct btrfs_inode_item *dst_item;
432 dst_item = (struct btrfs_inode_item *)dst_ptr;
433 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
434 btrfs_set_inode_generation(path->nodes[0], dst_item,
439 btrfs_mark_buffer_dirty(path->nodes[0]);
440 btrfs_release_path(path);
445 * simple helper to read an inode off the disk from a given root
446 * This can only be called for subvolume roots and not for the log
448 static noinline struct inode *read_one_inode(struct btrfs_root *root,
451 struct btrfs_key key;
454 key.objectid = objectid;
455 key.type = BTRFS_INODE_ITEM_KEY;
457 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
460 } else if (is_bad_inode(inode)) {
467 /* replays a single extent in 'eb' at 'slot' with 'key' into the
468 * subvolume 'root'. path is released on entry and should be released
471 * extents in the log tree have not been allocated out of the extent
472 * tree yet. So, this completes the allocation, taking a reference
473 * as required if the extent already exists or creating a new extent
474 * if it isn't in the extent allocation tree yet.
476 * The extent is inserted into the file, dropping any existing extents
477 * from the file that overlap the new one.
479 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
480 struct btrfs_root *root,
481 struct btrfs_path *path,
482 struct extent_buffer *eb, int slot,
483 struct btrfs_key *key)
486 u64 mask = root->sectorsize - 1;
489 u64 start = key->offset;
491 struct btrfs_file_extent_item *item;
492 struct inode *inode = NULL;
496 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
497 found_type = btrfs_file_extent_type(eb, item);
499 if (found_type == BTRFS_FILE_EXTENT_REG ||
500 found_type == BTRFS_FILE_EXTENT_PREALLOC)
501 extent_end = start + btrfs_file_extent_num_bytes(eb, item);
502 else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
503 size = btrfs_file_extent_inline_len(eb, item);
504 extent_end = (start + size + mask) & ~mask;
510 inode = read_one_inode(root, key->objectid);
517 * first check to see if we already have this extent in the
518 * file. This must be done before the btrfs_drop_extents run
519 * so we don't try to drop this extent.
521 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
525 (found_type == BTRFS_FILE_EXTENT_REG ||
526 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
527 struct btrfs_file_extent_item cmp1;
528 struct btrfs_file_extent_item cmp2;
529 struct btrfs_file_extent_item *existing;
530 struct extent_buffer *leaf;
532 leaf = path->nodes[0];
533 existing = btrfs_item_ptr(leaf, path->slots[0],
534 struct btrfs_file_extent_item);
536 read_extent_buffer(eb, &cmp1, (unsigned long)item,
538 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
542 * we already have a pointer to this exact extent,
543 * we don't have to do anything
545 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
546 btrfs_release_path(path);
550 btrfs_release_path(path);
552 saved_nbytes = inode_get_bytes(inode);
553 /* drop any overlapping extents */
554 ret = btrfs_drop_extents(trans, root, inode, start, extent_end,
558 if (found_type == BTRFS_FILE_EXTENT_REG ||
559 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
561 unsigned long dest_offset;
562 struct btrfs_key ins;
564 ret = btrfs_insert_empty_item(trans, root, path, key,
567 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
569 copy_extent_buffer(path->nodes[0], eb, dest_offset,
570 (unsigned long)item, sizeof(*item));
572 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
573 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
574 ins.type = BTRFS_EXTENT_ITEM_KEY;
575 offset = key->offset - btrfs_file_extent_offset(eb, item);
577 if (ins.objectid > 0) {
580 LIST_HEAD(ordered_sums);
582 * is this extent already allocated in the extent
583 * allocation tree? If so, just add a reference
585 ret = btrfs_lookup_extent(root, ins.objectid,
588 ret = btrfs_inc_extent_ref(trans, root,
589 ins.objectid, ins.offset,
590 0, root->root_key.objectid,
591 key->objectid, offset, 0);
595 * insert the extent pointer in the extent
598 ret = btrfs_alloc_logged_file_extent(trans,
599 root, root->root_key.objectid,
600 key->objectid, offset, &ins);
603 btrfs_release_path(path);
605 if (btrfs_file_extent_compression(eb, item)) {
606 csum_start = ins.objectid;
607 csum_end = csum_start + ins.offset;
609 csum_start = ins.objectid +
610 btrfs_file_extent_offset(eb, item);
611 csum_end = csum_start +
612 btrfs_file_extent_num_bytes(eb, item);
615 ret = btrfs_lookup_csums_range(root->log_root,
616 csum_start, csum_end - 1,
619 while (!list_empty(&ordered_sums)) {
620 struct btrfs_ordered_sum *sums;
621 sums = list_entry(ordered_sums.next,
622 struct btrfs_ordered_sum,
624 ret = btrfs_csum_file_blocks(trans,
625 root->fs_info->csum_root,
628 list_del(&sums->list);
632 btrfs_release_path(path);
634 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
635 /* inline extents are easy, we just overwrite them */
636 ret = overwrite_item(trans, root, path, eb, slot, key);
640 inode_set_bytes(inode, saved_nbytes);
641 ret = btrfs_update_inode(trans, root, inode);
649 * when cleaning up conflicts between the directory names in the
650 * subvolume, directory names in the log and directory names in the
651 * inode back references, we may have to unlink inodes from directories.
653 * This is a helper function to do the unlink of a specific directory
656 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
657 struct btrfs_root *root,
658 struct btrfs_path *path,
660 struct btrfs_dir_item *di)
665 struct extent_buffer *leaf;
666 struct btrfs_key location;
669 leaf = path->nodes[0];
671 btrfs_dir_item_key_to_cpu(leaf, di, &location);
672 name_len = btrfs_dir_name_len(leaf, di);
673 name = kmalloc(name_len, GFP_NOFS);
677 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
678 btrfs_release_path(path);
680 inode = read_one_inode(root, location.objectid);
686 ret = link_to_fixup_dir(trans, root, path, location.objectid);
689 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
695 btrfs_run_delayed_items(trans, root);
700 * helper function to see if a given name and sequence number found
701 * in an inode back reference are already in a directory and correctly
702 * point to this inode
704 static noinline int inode_in_dir(struct btrfs_root *root,
705 struct btrfs_path *path,
706 u64 dirid, u64 objectid, u64 index,
707 const char *name, int name_len)
709 struct btrfs_dir_item *di;
710 struct btrfs_key location;
713 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
714 index, name, name_len, 0);
715 if (di && !IS_ERR(di)) {
716 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
717 if (location.objectid != objectid)
721 btrfs_release_path(path);
723 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
724 if (di && !IS_ERR(di)) {
725 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
726 if (location.objectid != objectid)
732 btrfs_release_path(path);
737 * helper function to check a log tree for a named back reference in
738 * an inode. This is used to decide if a back reference that is
739 * found in the subvolume conflicts with what we find in the log.
741 * inode backreferences may have multiple refs in a single item,
742 * during replay we process one reference at a time, and we don't
743 * want to delete valid links to a file from the subvolume if that
744 * link is also in the log.
746 static noinline int backref_in_log(struct btrfs_root *log,
747 struct btrfs_key *key,
748 char *name, int namelen)
750 struct btrfs_path *path;
751 struct btrfs_inode_ref *ref;
753 unsigned long ptr_end;
754 unsigned long name_ptr;
760 path = btrfs_alloc_path();
764 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
768 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
769 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
770 ptr_end = ptr + item_size;
771 while (ptr < ptr_end) {
772 ref = (struct btrfs_inode_ref *)ptr;
773 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
774 if (found_name_len == namelen) {
775 name_ptr = (unsigned long)(ref + 1);
776 ret = memcmp_extent_buffer(path->nodes[0], name,
783 ptr = (unsigned long)(ref + 1) + found_name_len;
786 btrfs_free_path(path);
792 * replay one inode back reference item found in the log tree.
793 * eb, slot and key refer to the buffer and key found in the log tree.
794 * root is the destination we are replaying into, and path is for temp
795 * use by this function. (it should be released on return).
797 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
798 struct btrfs_root *root,
799 struct btrfs_root *log,
800 struct btrfs_path *path,
801 struct extent_buffer *eb, int slot,
802 struct btrfs_key *key)
804 struct btrfs_inode_ref *ref;
805 struct btrfs_dir_item *di;
808 unsigned long ref_ptr;
809 unsigned long ref_end;
816 * it is possible that we didn't log all the parent directories
817 * for a given inode. If we don't find the dir, just don't
818 * copy the back ref in. The link count fixup code will take
821 dir = read_one_inode(root, key->offset);
825 inode = read_one_inode(root, key->objectid);
831 ref_ptr = btrfs_item_ptr_offset(eb, slot);
832 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
835 ref = (struct btrfs_inode_ref *)ref_ptr;
837 namelen = btrfs_inode_ref_name_len(eb, ref);
838 name = kmalloc(namelen, GFP_NOFS);
841 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
843 /* if we already have a perfect match, we're done */
844 if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
845 btrfs_inode_ref_index(eb, ref),
851 * look for a conflicting back reference in the metadata.
852 * if we find one we have to unlink that name of the file
853 * before we add our new link. Later on, we overwrite any
854 * existing back reference, and we don't want to create
855 * dangling pointers in the directory.
861 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
865 struct btrfs_inode_ref *victim_ref;
867 unsigned long ptr_end;
868 struct extent_buffer *leaf = path->nodes[0];
870 /* are we trying to overwrite a back ref for the root directory
871 * if so, just jump out, we're done
873 if (key->objectid == key->offset)
876 /* check all the names in this back reference to see
877 * if they are in the log. if so, we allow them to stay
878 * otherwise they must be unlinked as a conflict
880 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
881 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
882 while (ptr < ptr_end) {
883 victim_ref = (struct btrfs_inode_ref *)ptr;
884 victim_name_len = btrfs_inode_ref_name_len(leaf,
886 victim_name = kmalloc(victim_name_len, GFP_NOFS);
887 BUG_ON(!victim_name);
889 read_extent_buffer(leaf, victim_name,
890 (unsigned long)(victim_ref + 1),
893 if (!backref_in_log(log, key, victim_name,
895 btrfs_inc_nlink(inode);
896 btrfs_release_path(path);
898 ret = btrfs_unlink_inode(trans, root, dir,
901 btrfs_run_delayed_items(trans, root);
904 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
909 * NOTE: we have searched root tree and checked the
910 * coresponding ref, it does not need to check again.
914 btrfs_release_path(path);
916 /* look for a conflicting sequence number */
917 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
918 btrfs_inode_ref_index(eb, ref),
920 if (di && !IS_ERR(di)) {
921 ret = drop_one_dir_item(trans, root, path, dir, di);
924 btrfs_release_path(path);
926 /* look for a conflicing name */
927 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
929 if (di && !IS_ERR(di)) {
930 ret = drop_one_dir_item(trans, root, path, dir, di);
933 btrfs_release_path(path);
936 /* insert our name */
937 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
938 btrfs_inode_ref_index(eb, ref));
941 btrfs_update_inode(trans, root, inode);
944 ref_ptr = (unsigned long)(ref + 1) + namelen;
946 if (ref_ptr < ref_end)
949 /* finally write the back reference in the inode */
950 ret = overwrite_item(trans, root, path, eb, slot, key);
954 btrfs_release_path(path);
960 static int insert_orphan_item(struct btrfs_trans_handle *trans,
961 struct btrfs_root *root, u64 offset)
964 ret = btrfs_find_orphan_item(root, offset);
966 ret = btrfs_insert_orphan_item(trans, root, offset);
972 * There are a few corners where the link count of the file can't
973 * be properly maintained during replay. So, instead of adding
974 * lots of complexity to the log code, we just scan the backrefs
975 * for any file that has been through replay.
977 * The scan will update the link count on the inode to reflect the
978 * number of back refs found. If it goes down to zero, the iput
979 * will free the inode.
981 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
982 struct btrfs_root *root,
985 struct btrfs_path *path;
987 struct btrfs_key key;
990 unsigned long ptr_end;
992 u64 ino = btrfs_ino(inode);
995 key.type = BTRFS_INODE_REF_KEY;
996 key.offset = (u64)-1;
998 path = btrfs_alloc_path();
1003 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1007 if (path->slots[0] == 0)
1011 btrfs_item_key_to_cpu(path->nodes[0], &key,
1013 if (key.objectid != ino ||
1014 key.type != BTRFS_INODE_REF_KEY)
1016 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1017 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1019 while (ptr < ptr_end) {
1020 struct btrfs_inode_ref *ref;
1022 ref = (struct btrfs_inode_ref *)ptr;
1023 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1025 ptr = (unsigned long)(ref + 1) + name_len;
1029 if (key.offset == 0)
1032 btrfs_release_path(path);
1034 btrfs_release_path(path);
1035 if (nlink != inode->i_nlink) {
1036 set_nlink(inode, nlink);
1037 btrfs_update_inode(trans, root, inode);
1039 BTRFS_I(inode)->index_cnt = (u64)-1;
1041 if (inode->i_nlink == 0) {
1042 if (S_ISDIR(inode->i_mode)) {
1043 ret = replay_dir_deletes(trans, root, NULL, path,
1047 ret = insert_orphan_item(trans, root, ino);
1050 btrfs_free_path(path);
1055 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1056 struct btrfs_root *root,
1057 struct btrfs_path *path)
1060 struct btrfs_key key;
1061 struct inode *inode;
1063 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1064 key.type = BTRFS_ORPHAN_ITEM_KEY;
1065 key.offset = (u64)-1;
1067 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1072 if (path->slots[0] == 0)
1077 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1078 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1079 key.type != BTRFS_ORPHAN_ITEM_KEY)
1082 ret = btrfs_del_item(trans, root, path);
1086 btrfs_release_path(path);
1087 inode = read_one_inode(root, key.offset);
1091 ret = fixup_inode_link_count(trans, root, inode);
1097 * fixup on a directory may create new entries,
1098 * make sure we always look for the highset possible
1101 key.offset = (u64)-1;
1105 btrfs_release_path(path);
1111 * record a given inode in the fixup dir so we can check its link
1112 * count when replay is done. The link count is incremented here
1113 * so the inode won't go away until we check it
1115 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1116 struct btrfs_root *root,
1117 struct btrfs_path *path,
1120 struct btrfs_key key;
1122 struct inode *inode;
1124 inode = read_one_inode(root, objectid);
1128 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1129 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1130 key.offset = objectid;
1132 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1134 btrfs_release_path(path);
1136 btrfs_inc_nlink(inode);
1137 ret = btrfs_update_inode(trans, root, inode);
1138 } else if (ret == -EEXIST) {
1149 * when replaying the log for a directory, we only insert names
1150 * for inodes that actually exist. This means an fsync on a directory
1151 * does not implicitly fsync all the new files in it
1153 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1154 struct btrfs_root *root,
1155 struct btrfs_path *path,
1156 u64 dirid, u64 index,
1157 char *name, int name_len, u8 type,
1158 struct btrfs_key *location)
1160 struct inode *inode;
1164 inode = read_one_inode(root, location->objectid);
1168 dir = read_one_inode(root, dirid);
1173 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1175 /* FIXME, put inode into FIXUP list */
1183 * take a single entry in a log directory item and replay it into
1186 * if a conflicting item exists in the subdirectory already,
1187 * the inode it points to is unlinked and put into the link count
1190 * If a name from the log points to a file or directory that does
1191 * not exist in the FS, it is skipped. fsyncs on directories
1192 * do not force down inodes inside that directory, just changes to the
1193 * names or unlinks in a directory.
1195 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1196 struct btrfs_root *root,
1197 struct btrfs_path *path,
1198 struct extent_buffer *eb,
1199 struct btrfs_dir_item *di,
1200 struct btrfs_key *key)
1204 struct btrfs_dir_item *dst_di;
1205 struct btrfs_key found_key;
1206 struct btrfs_key log_key;
1212 dir = read_one_inode(root, key->objectid);
1216 name_len = btrfs_dir_name_len(eb, di);
1217 name = kmalloc(name_len, GFP_NOFS);
1221 log_type = btrfs_dir_type(eb, di);
1222 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1225 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1226 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1231 btrfs_release_path(path);
1233 if (key->type == BTRFS_DIR_ITEM_KEY) {
1234 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1236 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1237 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1244 if (IS_ERR_OR_NULL(dst_di)) {
1245 /* we need a sequence number to insert, so we only
1246 * do inserts for the BTRFS_DIR_INDEX_KEY types
1248 if (key->type != BTRFS_DIR_INDEX_KEY)
1253 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1254 /* the existing item matches the logged item */
1255 if (found_key.objectid == log_key.objectid &&
1256 found_key.type == log_key.type &&
1257 found_key.offset == log_key.offset &&
1258 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1263 * don't drop the conflicting directory entry if the inode
1264 * for the new entry doesn't exist
1269 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1272 if (key->type == BTRFS_DIR_INDEX_KEY)
1275 btrfs_release_path(path);
1281 btrfs_release_path(path);
1282 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1283 name, name_len, log_type, &log_key);
1285 BUG_ON(ret && ret != -ENOENT);
1290 * find all the names in a directory item and reconcile them into
1291 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1292 * one name in a directory item, but the same code gets used for
1293 * both directory index types
1295 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1296 struct btrfs_root *root,
1297 struct btrfs_path *path,
1298 struct extent_buffer *eb, int slot,
1299 struct btrfs_key *key)
1302 u32 item_size = btrfs_item_size_nr(eb, slot);
1303 struct btrfs_dir_item *di;
1306 unsigned long ptr_end;
1308 ptr = btrfs_item_ptr_offset(eb, slot);
1309 ptr_end = ptr + item_size;
1310 while (ptr < ptr_end) {
1311 di = (struct btrfs_dir_item *)ptr;
1312 if (verify_dir_item(root, eb, di))
1314 name_len = btrfs_dir_name_len(eb, di);
1315 ret = replay_one_name(trans, root, path, eb, di, key);
1317 ptr = (unsigned long)(di + 1);
1324 * directory replay has two parts. There are the standard directory
1325 * items in the log copied from the subvolume, and range items
1326 * created in the log while the subvolume was logged.
1328 * The range items tell us which parts of the key space the log
1329 * is authoritative for. During replay, if a key in the subvolume
1330 * directory is in a logged range item, but not actually in the log
1331 * that means it was deleted from the directory before the fsync
1332 * and should be removed.
1334 static noinline int find_dir_range(struct btrfs_root *root,
1335 struct btrfs_path *path,
1336 u64 dirid, int key_type,
1337 u64 *start_ret, u64 *end_ret)
1339 struct btrfs_key key;
1341 struct btrfs_dir_log_item *item;
1345 if (*start_ret == (u64)-1)
1348 key.objectid = dirid;
1349 key.type = key_type;
1350 key.offset = *start_ret;
1352 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1356 if (path->slots[0] == 0)
1361 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1363 if (key.type != key_type || key.objectid != dirid) {
1367 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1368 struct btrfs_dir_log_item);
1369 found_end = btrfs_dir_log_end(path->nodes[0], item);
1371 if (*start_ret >= key.offset && *start_ret <= found_end) {
1373 *start_ret = key.offset;
1374 *end_ret = found_end;
1379 /* check the next slot in the tree to see if it is a valid item */
1380 nritems = btrfs_header_nritems(path->nodes[0]);
1381 if (path->slots[0] >= nritems) {
1382 ret = btrfs_next_leaf(root, path);
1389 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1391 if (key.type != key_type || key.objectid != dirid) {
1395 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1396 struct btrfs_dir_log_item);
1397 found_end = btrfs_dir_log_end(path->nodes[0], item);
1398 *start_ret = key.offset;
1399 *end_ret = found_end;
1402 btrfs_release_path(path);
1407 * this looks for a given directory item in the log. If the directory
1408 * item is not in the log, the item is removed and the inode it points
1411 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1412 struct btrfs_root *root,
1413 struct btrfs_root *log,
1414 struct btrfs_path *path,
1415 struct btrfs_path *log_path,
1417 struct btrfs_key *dir_key)
1420 struct extent_buffer *eb;
1423 struct btrfs_dir_item *di;
1424 struct btrfs_dir_item *log_di;
1427 unsigned long ptr_end;
1429 struct inode *inode;
1430 struct btrfs_key location;
1433 eb = path->nodes[0];
1434 slot = path->slots[0];
1435 item_size = btrfs_item_size_nr(eb, slot);
1436 ptr = btrfs_item_ptr_offset(eb, slot);
1437 ptr_end = ptr + item_size;
1438 while (ptr < ptr_end) {
1439 di = (struct btrfs_dir_item *)ptr;
1440 if (verify_dir_item(root, eb, di)) {
1445 name_len = btrfs_dir_name_len(eb, di);
1446 name = kmalloc(name_len, GFP_NOFS);
1451 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1454 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1455 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1458 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1459 log_di = btrfs_lookup_dir_index_item(trans, log,
1465 if (IS_ERR_OR_NULL(log_di)) {
1466 btrfs_dir_item_key_to_cpu(eb, di, &location);
1467 btrfs_release_path(path);
1468 btrfs_release_path(log_path);
1469 inode = read_one_inode(root, location.objectid);
1475 ret = link_to_fixup_dir(trans, root,
1476 path, location.objectid);
1478 btrfs_inc_nlink(inode);
1479 ret = btrfs_unlink_inode(trans, root, dir, inode,
1483 btrfs_run_delayed_items(trans, root);
1488 /* there might still be more names under this key
1489 * check and repeat if required
1491 ret = btrfs_search_slot(NULL, root, dir_key, path,
1498 btrfs_release_path(log_path);
1501 ptr = (unsigned long)(di + 1);
1506 btrfs_release_path(path);
1507 btrfs_release_path(log_path);
1512 * deletion replay happens before we copy any new directory items
1513 * out of the log or out of backreferences from inodes. It
1514 * scans the log to find ranges of keys that log is authoritative for,
1515 * and then scans the directory to find items in those ranges that are
1516 * not present in the log.
1518 * Anything we don't find in the log is unlinked and removed from the
1521 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1522 struct btrfs_root *root,
1523 struct btrfs_root *log,
1524 struct btrfs_path *path,
1525 u64 dirid, int del_all)
1529 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1531 struct btrfs_key dir_key;
1532 struct btrfs_key found_key;
1533 struct btrfs_path *log_path;
1536 dir_key.objectid = dirid;
1537 dir_key.type = BTRFS_DIR_ITEM_KEY;
1538 log_path = btrfs_alloc_path();
1542 dir = read_one_inode(root, dirid);
1543 /* it isn't an error if the inode isn't there, that can happen
1544 * because we replay the deletes before we copy in the inode item
1548 btrfs_free_path(log_path);
1556 range_end = (u64)-1;
1558 ret = find_dir_range(log, path, dirid, key_type,
1559 &range_start, &range_end);
1564 dir_key.offset = range_start;
1567 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1572 nritems = btrfs_header_nritems(path->nodes[0]);
1573 if (path->slots[0] >= nritems) {
1574 ret = btrfs_next_leaf(root, path);
1578 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1580 if (found_key.objectid != dirid ||
1581 found_key.type != dir_key.type)
1584 if (found_key.offset > range_end)
1587 ret = check_item_in_log(trans, root, log, path,
1591 if (found_key.offset == (u64)-1)
1593 dir_key.offset = found_key.offset + 1;
1595 btrfs_release_path(path);
1596 if (range_end == (u64)-1)
1598 range_start = range_end + 1;
1603 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
1604 key_type = BTRFS_DIR_LOG_INDEX_KEY;
1605 dir_key.type = BTRFS_DIR_INDEX_KEY;
1606 btrfs_release_path(path);
1610 btrfs_release_path(path);
1611 btrfs_free_path(log_path);
1617 * the process_func used to replay items from the log tree. This
1618 * gets called in two different stages. The first stage just looks
1619 * for inodes and makes sure they are all copied into the subvolume.
1621 * The second stage copies all the other item types from the log into
1622 * the subvolume. The two stage approach is slower, but gets rid of
1623 * lots of complexity around inodes referencing other inodes that exist
1624 * only in the log (references come from either directory items or inode
1627 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1628 struct walk_control *wc, u64 gen)
1631 struct btrfs_path *path;
1632 struct btrfs_root *root = wc->replay_dest;
1633 struct btrfs_key key;
1638 ret = btrfs_read_buffer(eb, gen);
1642 level = btrfs_header_level(eb);
1647 path = btrfs_alloc_path();
1651 nritems = btrfs_header_nritems(eb);
1652 for (i = 0; i < nritems; i++) {
1653 btrfs_item_key_to_cpu(eb, &key, i);
1655 /* inode keys are done during the first stage */
1656 if (key.type == BTRFS_INODE_ITEM_KEY &&
1657 wc->stage == LOG_WALK_REPLAY_INODES) {
1658 struct btrfs_inode_item *inode_item;
1661 inode_item = btrfs_item_ptr(eb, i,
1662 struct btrfs_inode_item);
1663 mode = btrfs_inode_mode(eb, inode_item);
1664 if (S_ISDIR(mode)) {
1665 ret = replay_dir_deletes(wc->trans,
1666 root, log, path, key.objectid, 0);
1669 ret = overwrite_item(wc->trans, root, path,
1673 /* for regular files, make sure corresponding
1674 * orhpan item exist. extents past the new EOF
1675 * will be truncated later by orphan cleanup.
1677 if (S_ISREG(mode)) {
1678 ret = insert_orphan_item(wc->trans, root,
1683 ret = link_to_fixup_dir(wc->trans, root,
1684 path, key.objectid);
1687 if (wc->stage < LOG_WALK_REPLAY_ALL)
1690 /* these keys are simply copied */
1691 if (key.type == BTRFS_XATTR_ITEM_KEY) {
1692 ret = overwrite_item(wc->trans, root, path,
1695 } else if (key.type == BTRFS_INODE_REF_KEY) {
1696 ret = add_inode_ref(wc->trans, root, log, path,
1698 BUG_ON(ret && ret != -ENOENT);
1699 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
1700 ret = replay_one_extent(wc->trans, root, path,
1703 } else if (key.type == BTRFS_DIR_ITEM_KEY ||
1704 key.type == BTRFS_DIR_INDEX_KEY) {
1705 ret = replay_one_dir_item(wc->trans, root, path,
1710 btrfs_free_path(path);
1714 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1715 struct btrfs_root *root,
1716 struct btrfs_path *path, int *level,
1717 struct walk_control *wc)
1722 struct extent_buffer *next;
1723 struct extent_buffer *cur;
1724 struct extent_buffer *parent;
1728 WARN_ON(*level < 0);
1729 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1731 while (*level > 0) {
1732 WARN_ON(*level < 0);
1733 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1734 cur = path->nodes[*level];
1736 if (btrfs_header_level(cur) != *level)
1739 if (path->slots[*level] >=
1740 btrfs_header_nritems(cur))
1743 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1744 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
1745 blocksize = btrfs_level_size(root, *level - 1);
1747 parent = path->nodes[*level];
1748 root_owner = btrfs_header_owner(parent);
1750 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
1755 ret = wc->process_func(root, next, wc, ptr_gen);
1759 path->slots[*level]++;
1761 ret = btrfs_read_buffer(next, ptr_gen);
1763 free_extent_buffer(next);
1767 btrfs_tree_lock(next);
1768 btrfs_set_lock_blocking(next);
1769 clean_tree_block(trans, root, next);
1770 btrfs_wait_tree_block_writeback(next);
1771 btrfs_tree_unlock(next);
1773 WARN_ON(root_owner !=
1774 BTRFS_TREE_LOG_OBJECTID);
1775 ret = btrfs_free_and_pin_reserved_extent(root,
1777 BUG_ON(ret); /* -ENOMEM or logic errors */
1779 free_extent_buffer(next);
1782 ret = btrfs_read_buffer(next, ptr_gen);
1784 free_extent_buffer(next);
1788 WARN_ON(*level <= 0);
1789 if (path->nodes[*level-1])
1790 free_extent_buffer(path->nodes[*level-1]);
1791 path->nodes[*level-1] = next;
1792 *level = btrfs_header_level(next);
1793 path->slots[*level] = 0;
1796 WARN_ON(*level < 0);
1797 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1799 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
1805 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1806 struct btrfs_root *root,
1807 struct btrfs_path *path, int *level,
1808 struct walk_control *wc)
1815 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1816 slot = path->slots[i];
1817 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
1820 WARN_ON(*level == 0);
1823 struct extent_buffer *parent;
1824 if (path->nodes[*level] == root->node)
1825 parent = path->nodes[*level];
1827 parent = path->nodes[*level + 1];
1829 root_owner = btrfs_header_owner(parent);
1830 ret = wc->process_func(root, path->nodes[*level], wc,
1831 btrfs_header_generation(path->nodes[*level]));
1836 struct extent_buffer *next;
1838 next = path->nodes[*level];
1840 btrfs_tree_lock(next);
1841 btrfs_set_lock_blocking(next);
1842 clean_tree_block(trans, root, next);
1843 btrfs_wait_tree_block_writeback(next);
1844 btrfs_tree_unlock(next);
1846 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
1847 ret = btrfs_free_and_pin_reserved_extent(root,
1848 path->nodes[*level]->start,
1849 path->nodes[*level]->len);
1852 free_extent_buffer(path->nodes[*level]);
1853 path->nodes[*level] = NULL;
1861 * drop the reference count on the tree rooted at 'snap'. This traverses
1862 * the tree freeing any blocks that have a ref count of zero after being
1865 static int walk_log_tree(struct btrfs_trans_handle *trans,
1866 struct btrfs_root *log, struct walk_control *wc)
1871 struct btrfs_path *path;
1875 path = btrfs_alloc_path();
1879 level = btrfs_header_level(log->node);
1881 path->nodes[level] = log->node;
1882 extent_buffer_get(log->node);
1883 path->slots[level] = 0;
1886 wret = walk_down_log_tree(trans, log, path, &level, wc);
1894 wret = walk_up_log_tree(trans, log, path, &level, wc);
1903 /* was the root node processed? if not, catch it here */
1904 if (path->nodes[orig_level]) {
1905 ret = wc->process_func(log, path->nodes[orig_level], wc,
1906 btrfs_header_generation(path->nodes[orig_level]));
1910 struct extent_buffer *next;
1912 next = path->nodes[orig_level];
1914 btrfs_tree_lock(next);
1915 btrfs_set_lock_blocking(next);
1916 clean_tree_block(trans, log, next);
1917 btrfs_wait_tree_block_writeback(next);
1918 btrfs_tree_unlock(next);
1920 WARN_ON(log->root_key.objectid !=
1921 BTRFS_TREE_LOG_OBJECTID);
1922 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
1924 BUG_ON(ret); /* -ENOMEM or logic errors */
1929 for (i = 0; i <= orig_level; i++) {
1930 if (path->nodes[i]) {
1931 free_extent_buffer(path->nodes[i]);
1932 path->nodes[i] = NULL;
1935 btrfs_free_path(path);
1940 * helper function to update the item for a given subvolumes log root
1941 * in the tree of log roots
1943 static int update_log_root(struct btrfs_trans_handle *trans,
1944 struct btrfs_root *log)
1948 if (log->log_transid == 1) {
1949 /* insert root item on the first sync */
1950 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
1951 &log->root_key, &log->root_item);
1953 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
1954 &log->root_key, &log->root_item);
1959 static int wait_log_commit(struct btrfs_trans_handle *trans,
1960 struct btrfs_root *root, unsigned long transid)
1963 int index = transid % 2;
1966 * we only allow two pending log transactions at a time,
1967 * so we know that if ours is more than 2 older than the
1968 * current transaction, we're done
1971 prepare_to_wait(&root->log_commit_wait[index],
1972 &wait, TASK_UNINTERRUPTIBLE);
1973 mutex_unlock(&root->log_mutex);
1975 if (root->fs_info->last_trans_log_full_commit !=
1976 trans->transid && root->log_transid < transid + 2 &&
1977 atomic_read(&root->log_commit[index]))
1980 finish_wait(&root->log_commit_wait[index], &wait);
1981 mutex_lock(&root->log_mutex);
1982 } while (root->fs_info->last_trans_log_full_commit !=
1983 trans->transid && root->log_transid < transid + 2 &&
1984 atomic_read(&root->log_commit[index]));
1988 static void wait_for_writer(struct btrfs_trans_handle *trans,
1989 struct btrfs_root *root)
1992 while (root->fs_info->last_trans_log_full_commit !=
1993 trans->transid && atomic_read(&root->log_writers)) {
1994 prepare_to_wait(&root->log_writer_wait,
1995 &wait, TASK_UNINTERRUPTIBLE);
1996 mutex_unlock(&root->log_mutex);
1997 if (root->fs_info->last_trans_log_full_commit !=
1998 trans->transid && atomic_read(&root->log_writers))
2000 mutex_lock(&root->log_mutex);
2001 finish_wait(&root->log_writer_wait, &wait);
2006 * btrfs_sync_log does sends a given tree log down to the disk and
2007 * updates the super blocks to record it. When this call is done,
2008 * you know that any inodes previously logged are safely on disk only
2011 * Any other return value means you need to call btrfs_commit_transaction.
2012 * Some of the edge cases for fsyncing directories that have had unlinks
2013 * or renames done in the past mean that sometimes the only safe
2014 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2015 * that has happened.
2017 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2018 struct btrfs_root *root)
2024 struct btrfs_root *log = root->log_root;
2025 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2026 unsigned long log_transid = 0;
2028 mutex_lock(&root->log_mutex);
2029 index1 = root->log_transid % 2;
2030 if (atomic_read(&root->log_commit[index1])) {
2031 wait_log_commit(trans, root, root->log_transid);
2032 mutex_unlock(&root->log_mutex);
2035 atomic_set(&root->log_commit[index1], 1);
2037 /* wait for previous tree log sync to complete */
2038 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2039 wait_log_commit(trans, root, root->log_transid - 1);
2041 unsigned long batch = root->log_batch;
2042 /* when we're on an ssd, just kick the log commit out */
2043 if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
2044 mutex_unlock(&root->log_mutex);
2045 schedule_timeout_uninterruptible(1);
2046 mutex_lock(&root->log_mutex);
2048 wait_for_writer(trans, root);
2049 if (batch == root->log_batch)
2053 /* bail out if we need to do a full commit */
2054 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2056 mutex_unlock(&root->log_mutex);
2060 log_transid = root->log_transid;
2061 if (log_transid % 2 == 0)
2062 mark = EXTENT_DIRTY;
2066 /* we start IO on all the marked extents here, but we don't actually
2067 * wait for them until later.
2069 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2071 btrfs_abort_transaction(trans, root, ret);
2072 mutex_unlock(&root->log_mutex);
2076 btrfs_set_root_node(&log->root_item, log->node);
2078 root->log_batch = 0;
2079 root->log_transid++;
2080 log->log_transid = root->log_transid;
2081 root->log_start_pid = 0;
2084 * IO has been started, blocks of the log tree have WRITTEN flag set
2085 * in their headers. new modifications of the log will be written to
2086 * new positions. so it's safe to allow log writers to go in.
2088 mutex_unlock(&root->log_mutex);
2090 mutex_lock(&log_root_tree->log_mutex);
2091 log_root_tree->log_batch++;
2092 atomic_inc(&log_root_tree->log_writers);
2093 mutex_unlock(&log_root_tree->log_mutex);
2095 ret = update_log_root(trans, log);
2097 mutex_lock(&log_root_tree->log_mutex);
2098 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2100 if (waitqueue_active(&log_root_tree->log_writer_wait))
2101 wake_up(&log_root_tree->log_writer_wait);
2105 if (ret != -ENOSPC) {
2106 btrfs_abort_transaction(trans, root, ret);
2107 mutex_unlock(&log_root_tree->log_mutex);
2110 root->fs_info->last_trans_log_full_commit = trans->transid;
2111 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2112 mutex_unlock(&log_root_tree->log_mutex);
2117 index2 = log_root_tree->log_transid % 2;
2118 if (atomic_read(&log_root_tree->log_commit[index2])) {
2119 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2120 wait_log_commit(trans, log_root_tree,
2121 log_root_tree->log_transid);
2122 mutex_unlock(&log_root_tree->log_mutex);
2126 atomic_set(&log_root_tree->log_commit[index2], 1);
2128 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2129 wait_log_commit(trans, log_root_tree,
2130 log_root_tree->log_transid - 1);
2133 wait_for_writer(trans, log_root_tree);
2136 * now that we've moved on to the tree of log tree roots,
2137 * check the full commit flag again
2139 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2140 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2141 mutex_unlock(&log_root_tree->log_mutex);
2143 goto out_wake_log_root;
2146 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2147 &log_root_tree->dirty_log_pages,
2148 EXTENT_DIRTY | EXTENT_NEW);
2150 btrfs_abort_transaction(trans, root, ret);
2151 mutex_unlock(&log_root_tree->log_mutex);
2152 goto out_wake_log_root;
2154 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2156 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2157 log_root_tree->node->start);
2158 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2159 btrfs_header_level(log_root_tree->node));
2161 log_root_tree->log_batch = 0;
2162 log_root_tree->log_transid++;
2165 mutex_unlock(&log_root_tree->log_mutex);
2168 * nobody else is going to jump in and write the the ctree
2169 * super here because the log_commit atomic below is protecting
2170 * us. We must be called with a transaction handle pinning
2171 * the running transaction open, so a full commit can't hop
2172 * in and cause problems either.
2174 btrfs_scrub_pause_super(root);
2175 write_ctree_super(trans, root->fs_info->tree_root, 1);
2176 btrfs_scrub_continue_super(root);
2179 mutex_lock(&root->log_mutex);
2180 if (root->last_log_commit < log_transid)
2181 root->last_log_commit = log_transid;
2182 mutex_unlock(&root->log_mutex);
2185 atomic_set(&log_root_tree->log_commit[index2], 0);
2187 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2188 wake_up(&log_root_tree->log_commit_wait[index2]);
2190 atomic_set(&root->log_commit[index1], 0);
2192 if (waitqueue_active(&root->log_commit_wait[index1]))
2193 wake_up(&root->log_commit_wait[index1]);
2197 static void free_log_tree(struct btrfs_trans_handle *trans,
2198 struct btrfs_root *log)
2203 struct walk_control wc = {
2205 .process_func = process_one_buffer
2208 ret = walk_log_tree(trans, log, &wc);
2212 ret = find_first_extent_bit(&log->dirty_log_pages,
2213 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
2217 clear_extent_bits(&log->dirty_log_pages, start, end,
2218 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2221 free_extent_buffer(log->node);
2226 * free all the extents used by the tree log. This should be called
2227 * at commit time of the full transaction
2229 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2231 if (root->log_root) {
2232 free_log_tree(trans, root->log_root);
2233 root->log_root = NULL;
2238 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2239 struct btrfs_fs_info *fs_info)
2241 if (fs_info->log_root_tree) {
2242 free_log_tree(trans, fs_info->log_root_tree);
2243 fs_info->log_root_tree = NULL;
2249 * If both a file and directory are logged, and unlinks or renames are
2250 * mixed in, we have a few interesting corners:
2252 * create file X in dir Y
2253 * link file X to X.link in dir Y
2255 * unlink file X but leave X.link
2258 * After a crash we would expect only X.link to exist. But file X
2259 * didn't get fsync'd again so the log has back refs for X and X.link.
2261 * We solve this by removing directory entries and inode backrefs from the
2262 * log when a file that was logged in the current transaction is
2263 * unlinked. Any later fsync will include the updated log entries, and
2264 * we'll be able to reconstruct the proper directory items from backrefs.
2266 * This optimizations allows us to avoid relogging the entire inode
2267 * or the entire directory.
2269 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2270 struct btrfs_root *root,
2271 const char *name, int name_len,
2272 struct inode *dir, u64 index)
2274 struct btrfs_root *log;
2275 struct btrfs_dir_item *di;
2276 struct btrfs_path *path;
2280 u64 dir_ino = btrfs_ino(dir);
2282 if (BTRFS_I(dir)->logged_trans < trans->transid)
2285 ret = join_running_log_trans(root);
2289 mutex_lock(&BTRFS_I(dir)->log_mutex);
2291 log = root->log_root;
2292 path = btrfs_alloc_path();
2298 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2299 name, name_len, -1);
2305 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2306 bytes_del += name_len;
2309 btrfs_release_path(path);
2310 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2311 index, name, name_len, -1);
2317 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2318 bytes_del += name_len;
2322 /* update the directory size in the log to reflect the names
2326 struct btrfs_key key;
2328 key.objectid = dir_ino;
2330 key.type = BTRFS_INODE_ITEM_KEY;
2331 btrfs_release_path(path);
2333 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2339 struct btrfs_inode_item *item;
2342 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2343 struct btrfs_inode_item);
2344 i_size = btrfs_inode_size(path->nodes[0], item);
2345 if (i_size > bytes_del)
2346 i_size -= bytes_del;
2349 btrfs_set_inode_size(path->nodes[0], item, i_size);
2350 btrfs_mark_buffer_dirty(path->nodes[0]);
2353 btrfs_release_path(path);
2356 btrfs_free_path(path);
2358 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2359 if (ret == -ENOSPC) {
2360 root->fs_info->last_trans_log_full_commit = trans->transid;
2363 btrfs_abort_transaction(trans, root, ret);
2365 btrfs_end_log_trans(root);
2370 /* see comments for btrfs_del_dir_entries_in_log */
2371 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2372 struct btrfs_root *root,
2373 const char *name, int name_len,
2374 struct inode *inode, u64 dirid)
2376 struct btrfs_root *log;
2380 if (BTRFS_I(inode)->logged_trans < trans->transid)
2383 ret = join_running_log_trans(root);
2386 log = root->log_root;
2387 mutex_lock(&BTRFS_I(inode)->log_mutex);
2389 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2391 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2392 if (ret == -ENOSPC) {
2393 root->fs_info->last_trans_log_full_commit = trans->transid;
2395 } else if (ret < 0 && ret != -ENOENT)
2396 btrfs_abort_transaction(trans, root, ret);
2397 btrfs_end_log_trans(root);
2403 * creates a range item in the log for 'dirid'. first_offset and
2404 * last_offset tell us which parts of the key space the log should
2405 * be considered authoritative for.
2407 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2408 struct btrfs_root *log,
2409 struct btrfs_path *path,
2410 int key_type, u64 dirid,
2411 u64 first_offset, u64 last_offset)
2414 struct btrfs_key key;
2415 struct btrfs_dir_log_item *item;
2417 key.objectid = dirid;
2418 key.offset = first_offset;
2419 if (key_type == BTRFS_DIR_ITEM_KEY)
2420 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2422 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2423 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2427 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2428 struct btrfs_dir_log_item);
2429 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2430 btrfs_mark_buffer_dirty(path->nodes[0]);
2431 btrfs_release_path(path);
2436 * log all the items included in the current transaction for a given
2437 * directory. This also creates the range items in the log tree required
2438 * to replay anything deleted before the fsync
2440 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2441 struct btrfs_root *root, struct inode *inode,
2442 struct btrfs_path *path,
2443 struct btrfs_path *dst_path, int key_type,
2444 u64 min_offset, u64 *last_offset_ret)
2446 struct btrfs_key min_key;
2447 struct btrfs_key max_key;
2448 struct btrfs_root *log = root->log_root;
2449 struct extent_buffer *src;
2454 u64 first_offset = min_offset;
2455 u64 last_offset = (u64)-1;
2456 u64 ino = btrfs_ino(inode);
2458 log = root->log_root;
2459 max_key.objectid = ino;
2460 max_key.offset = (u64)-1;
2461 max_key.type = key_type;
2463 min_key.objectid = ino;
2464 min_key.type = key_type;
2465 min_key.offset = min_offset;
2467 path->keep_locks = 1;
2469 ret = btrfs_search_forward(root, &min_key, &max_key,
2470 path, 0, trans->transid);
2473 * we didn't find anything from this transaction, see if there
2474 * is anything at all
2476 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
2477 min_key.objectid = ino;
2478 min_key.type = key_type;
2479 min_key.offset = (u64)-1;
2480 btrfs_release_path(path);
2481 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2483 btrfs_release_path(path);
2486 ret = btrfs_previous_item(root, path, ino, key_type);
2488 /* if ret == 0 there are items for this type,
2489 * create a range to tell us the last key of this type.
2490 * otherwise, there are no items in this directory after
2491 * *min_offset, and we create a range to indicate that.
2494 struct btrfs_key tmp;
2495 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
2497 if (key_type == tmp.type)
2498 first_offset = max(min_offset, tmp.offset) + 1;
2503 /* go backward to find any previous key */
2504 ret = btrfs_previous_item(root, path, ino, key_type);
2506 struct btrfs_key tmp;
2507 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2508 if (key_type == tmp.type) {
2509 first_offset = tmp.offset;
2510 ret = overwrite_item(trans, log, dst_path,
2511 path->nodes[0], path->slots[0],
2519 btrfs_release_path(path);
2521 /* find the first key from this transaction again */
2522 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2529 * we have a block from this transaction, log every item in it
2530 * from our directory
2533 struct btrfs_key tmp;
2534 src = path->nodes[0];
2535 nritems = btrfs_header_nritems(src);
2536 for (i = path->slots[0]; i < nritems; i++) {
2537 btrfs_item_key_to_cpu(src, &min_key, i);
2539 if (min_key.objectid != ino || min_key.type != key_type)
2541 ret = overwrite_item(trans, log, dst_path, src, i,
2548 path->slots[0] = nritems;
2551 * look ahead to the next item and see if it is also
2552 * from this directory and from this transaction
2554 ret = btrfs_next_leaf(root, path);
2556 last_offset = (u64)-1;
2559 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2560 if (tmp.objectid != ino || tmp.type != key_type) {
2561 last_offset = (u64)-1;
2564 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
2565 ret = overwrite_item(trans, log, dst_path,
2566 path->nodes[0], path->slots[0],
2571 last_offset = tmp.offset;
2576 btrfs_release_path(path);
2577 btrfs_release_path(dst_path);
2580 *last_offset_ret = last_offset;
2582 * insert the log range keys to indicate where the log
2585 ret = insert_dir_log_key(trans, log, path, key_type,
2586 ino, first_offset, last_offset);
2594 * logging directories is very similar to logging inodes, We find all the items
2595 * from the current transaction and write them to the log.
2597 * The recovery code scans the directory in the subvolume, and if it finds a
2598 * key in the range logged that is not present in the log tree, then it means
2599 * that dir entry was unlinked during the transaction.
2601 * In order for that scan to work, we must include one key smaller than
2602 * the smallest logged by this transaction and one key larger than the largest
2603 * key logged by this transaction.
2605 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
2606 struct btrfs_root *root, struct inode *inode,
2607 struct btrfs_path *path,
2608 struct btrfs_path *dst_path)
2613 int key_type = BTRFS_DIR_ITEM_KEY;
2619 ret = log_dir_items(trans, root, inode, path,
2620 dst_path, key_type, min_key,
2624 if (max_key == (u64)-1)
2626 min_key = max_key + 1;
2629 if (key_type == BTRFS_DIR_ITEM_KEY) {
2630 key_type = BTRFS_DIR_INDEX_KEY;
2637 * a helper function to drop items from the log before we relog an
2638 * inode. max_key_type indicates the highest item type to remove.
2639 * This cannot be run for file data extents because it does not
2640 * free the extents they point to.
2642 static int drop_objectid_items(struct btrfs_trans_handle *trans,
2643 struct btrfs_root *log,
2644 struct btrfs_path *path,
2645 u64 objectid, int max_key_type)
2648 struct btrfs_key key;
2649 struct btrfs_key found_key;
2651 key.objectid = objectid;
2652 key.type = max_key_type;
2653 key.offset = (u64)-1;
2656 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
2661 if (path->slots[0] == 0)
2665 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2668 if (found_key.objectid != objectid)
2671 ret = btrfs_del_item(trans, log, path);
2674 btrfs_release_path(path);
2676 btrfs_release_path(path);
2682 static noinline int copy_items(struct btrfs_trans_handle *trans,
2683 struct btrfs_root *log,
2684 struct btrfs_path *dst_path,
2685 struct extent_buffer *src,
2686 int start_slot, int nr, int inode_only)
2688 unsigned long src_offset;
2689 unsigned long dst_offset;
2690 struct btrfs_file_extent_item *extent;
2691 struct btrfs_inode_item *inode_item;
2693 struct btrfs_key *ins_keys;
2697 struct list_head ordered_sums;
2699 INIT_LIST_HEAD(&ordered_sums);
2701 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
2702 nr * sizeof(u32), GFP_NOFS);
2706 ins_sizes = (u32 *)ins_data;
2707 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
2709 for (i = 0; i < nr; i++) {
2710 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
2711 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
2713 ret = btrfs_insert_empty_items(trans, log, dst_path,
2714 ins_keys, ins_sizes, nr);
2720 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
2721 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
2722 dst_path->slots[0]);
2724 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
2726 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
2727 src_offset, ins_sizes[i]);
2729 if (inode_only == LOG_INODE_EXISTS &&
2730 ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
2731 inode_item = btrfs_item_ptr(dst_path->nodes[0],
2733 struct btrfs_inode_item);
2734 btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0);
2736 /* set the generation to zero so the recover code
2737 * can tell the difference between an logging
2738 * just to say 'this inode exists' and a logging
2739 * to say 'update this inode with these values'
2741 btrfs_set_inode_generation(dst_path->nodes[0],
2744 /* take a reference on file data extents so that truncates
2745 * or deletes of this inode don't have to relog the inode
2748 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) {
2750 extent = btrfs_item_ptr(src, start_slot + i,
2751 struct btrfs_file_extent_item);
2753 if (btrfs_file_extent_generation(src, extent) < trans->transid)
2756 found_type = btrfs_file_extent_type(src, extent);
2757 if (found_type == BTRFS_FILE_EXTENT_REG ||
2758 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
2760 ds = btrfs_file_extent_disk_bytenr(src,
2762 /* ds == 0 is a hole */
2766 dl = btrfs_file_extent_disk_num_bytes(src,
2768 cs = btrfs_file_extent_offset(src, extent);
2769 cl = btrfs_file_extent_num_bytes(src,
2771 if (btrfs_file_extent_compression(src,
2777 ret = btrfs_lookup_csums_range(
2778 log->fs_info->csum_root,
2779 ds + cs, ds + cs + cl - 1,
2786 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
2787 btrfs_release_path(dst_path);
2791 * we have to do this after the loop above to avoid changing the
2792 * log tree while trying to change the log tree.
2795 while (!list_empty(&ordered_sums)) {
2796 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
2797 struct btrfs_ordered_sum,
2800 ret = btrfs_csum_file_blocks(trans, log, sums);
2801 list_del(&sums->list);
2807 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
2809 struct extent_map *em1, *em2;
2811 em1 = list_entry(a, struct extent_map, list);
2812 em2 = list_entry(b, struct extent_map, list);
2814 if (em1->start < em2->start)
2816 else if (em1->start > em2->start)
2822 struct extent_buffer *src;
2828 static int log_one_extent(struct btrfs_trans_handle *trans,
2829 struct inode *inode, struct btrfs_root *root,
2830 struct extent_map *em, struct btrfs_path *path,
2831 struct btrfs_path *dst_path, struct log_args *args)
2833 struct btrfs_root *log = root->log_root;
2834 struct btrfs_file_extent_item *fi;
2835 struct btrfs_key key;
2836 u64 start = em->mod_start;
2837 u64 len = em->mod_len;
2842 if (BTRFS_I(inode)->logged_trans == trans->transid) {
2844 ret = __btrfs_drop_extents(trans, log, inode, dst_path, start,
2845 start + len, &tmp, 0);
2853 key.objectid = btrfs_ino(inode);
2854 key.type = BTRFS_EXTENT_DATA_KEY;
2857 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2862 * This shouldn't happen, but it might so warn and
2868 args->src = path->nodes[0];
2870 fi = btrfs_item_ptr(args->src, path->slots[0],
2871 struct btrfs_file_extent_item);
2873 args->start_slot + args->nr == path->slots[0]) {
2875 } else if (args->nr) {
2876 ret = copy_items(trans, log, dst_path, args->src,
2877 args->start_slot, args->nr,
2882 args->start_slot = path->slots[0];
2883 } else if (!args->nr) {
2885 args->start_slot = path->slots[0];
2887 nritems = btrfs_header_nritems(path->nodes[0]);
2889 num_bytes = btrfs_file_extent_num_bytes(args->src, fi);
2890 if (len < num_bytes) {
2891 /* I _think_ this is ok, envision we write to a
2892 * preallocated space that is adjacent to a previously
2893 * written preallocated space that gets merged when we
2894 * mark this preallocated space written. If we do not
2895 * have the adjacent extent in cache then when we copy
2896 * this extent it could end up being larger than our EM
2897 * thinks it is, which is a-ok, so just set len to 0.
2903 start += btrfs_file_extent_num_bytes(args->src, fi);
2904 args->next_offset = start;
2906 if (path->slots[0] < nritems) {
2913 ret = copy_items(trans, log, dst_path, args->src,
2914 args->start_slot, args->nr,
2919 btrfs_release_path(path);
2926 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
2927 struct btrfs_root *root,
2928 struct inode *inode,
2929 struct btrfs_path *path,
2930 struct btrfs_path *dst_path)
2932 struct log_args args;
2933 struct btrfs_root *log = root->log_root;
2934 struct extent_map *em, *n;
2935 struct list_head extents;
2936 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
2940 INIT_LIST_HEAD(&extents);
2942 memset(&args, 0, sizeof(args));
2944 write_lock(&tree->lock);
2945 test_gen = root->fs_info->last_trans_committed;
2947 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
2948 list_del_init(&em->list);
2949 if (em->generation <= test_gen)
2951 list_add_tail(&em->list, &extents);
2954 list_sort(NULL, &extents, extent_cmp);
2956 while (!list_empty(&extents)) {
2957 em = list_entry(extents.next, struct extent_map, list);
2959 list_del_init(&em->list);
2962 * If we had an error we just need to delete everybody from our
2969 * If the previous EM and the last extent we left off on aren't
2970 * sequential then we need to copy the items we have and redo
2973 if (args.nr && em->mod_start != args.next_offset) {
2974 ret = copy_items(trans, log, dst_path, args.src,
2975 args.start_slot, args.nr,
2979 btrfs_release_path(path);
2983 ret = log_one_extent(trans, inode, root, em, path, dst_path, &args);
2986 if (!ret && args.nr)
2987 ret = copy_items(trans, log, dst_path, args.src,
2988 args.start_slot, args.nr, LOG_INODE_ALL);
2989 btrfs_release_path(path);
2990 WARN_ON(!list_empty(&extents));
2991 write_unlock(&tree->lock);
2995 /* log a single inode in the tree log.
2996 * At least one parent directory for this inode must exist in the tree
2997 * or be logged already.
2999 * Any items from this inode changed by the current transaction are copied
3000 * to the log tree. An extra reference is taken on any extents in this
3001 * file, allowing us to avoid a whole pile of corner cases around logging
3002 * blocks that have been removed from the tree.
3004 * See LOG_INODE_ALL and related defines for a description of what inode_only
3007 * This handles both files and directories.
3009 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3010 struct btrfs_root *root, struct inode *inode,
3013 struct btrfs_path *path;
3014 struct btrfs_path *dst_path;
3015 struct btrfs_key min_key;
3016 struct btrfs_key max_key;
3017 struct btrfs_root *log = root->log_root;
3018 struct extent_buffer *src = NULL;
3022 int ins_start_slot = 0;
3024 bool fast_search = false;
3025 u64 ino = btrfs_ino(inode);
3027 log = root->log_root;
3029 path = btrfs_alloc_path();
3032 dst_path = btrfs_alloc_path();
3034 btrfs_free_path(path);
3038 min_key.objectid = ino;
3039 min_key.type = BTRFS_INODE_ITEM_KEY;
3042 max_key.objectid = ino;
3045 /* today the code can only do partial logging of directories */
3046 if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
3047 max_key.type = BTRFS_XATTR_ITEM_KEY;
3049 max_key.type = (u8)-1;
3050 max_key.offset = (u64)-1;
3052 ret = btrfs_commit_inode_delayed_items(trans, inode);
3054 btrfs_free_path(path);
3055 btrfs_free_path(dst_path);
3059 mutex_lock(&BTRFS_I(inode)->log_mutex);
3062 * a brute force approach to making sure we get the most uptodate
3063 * copies of everything.
3065 if (S_ISDIR(inode->i_mode)) {
3066 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
3068 if (inode_only == LOG_INODE_EXISTS)
3069 max_key_type = BTRFS_XATTR_ITEM_KEY;
3070 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
3072 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3073 &BTRFS_I(inode)->runtime_flags)) {
3074 ret = btrfs_truncate_inode_items(trans, log,
3078 max_key.type = BTRFS_XATTR_ITEM_KEY;
3079 ret = drop_objectid_items(trans, log, path, ino,
3080 BTRFS_XATTR_ITEM_KEY);
3087 path->keep_locks = 1;
3091 ret = btrfs_search_forward(root, &min_key, &max_key,
3092 path, 0, trans->transid);
3096 /* note, ins_nr might be > 0 here, cleanup outside the loop */
3097 if (min_key.objectid != ino)
3099 if (min_key.type > max_key.type)
3102 src = path->nodes[0];
3103 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
3106 } else if (!ins_nr) {
3107 ins_start_slot = path->slots[0];
3112 ret = copy_items(trans, log, dst_path, src, ins_start_slot,
3113 ins_nr, inode_only);
3119 ins_start_slot = path->slots[0];
3122 nritems = btrfs_header_nritems(path->nodes[0]);
3124 if (path->slots[0] < nritems) {
3125 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
3130 ret = copy_items(trans, log, dst_path, src,
3132 ins_nr, inode_only);
3139 btrfs_release_path(path);
3141 if (min_key.offset < (u64)-1)
3143 else if (min_key.type < (u8)-1)
3145 else if (min_key.objectid < (u64)-1)
3151 ret = copy_items(trans, log, dst_path, src,
3153 ins_nr, inode_only);
3162 btrfs_release_path(path);
3163 btrfs_release_path(dst_path);
3164 ret = btrfs_log_changed_extents(trans, root, inode, path,
3171 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3172 struct extent_map *em, *n;
3174 list_for_each_entry_safe(em, n, &tree->modified_extents, list)
3175 list_del_init(&em->list);
3178 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
3179 btrfs_release_path(path);
3180 btrfs_release_path(dst_path);
3181 ret = log_directory_changes(trans, root, inode, path, dst_path);
3187 BTRFS_I(inode)->logged_trans = trans->transid;
3189 mutex_unlock(&BTRFS_I(inode)->log_mutex);
3191 btrfs_free_path(path);
3192 btrfs_free_path(dst_path);
3197 * follow the dentry parent pointers up the chain and see if any
3198 * of the directories in it require a full commit before they can
3199 * be logged. Returns zero if nothing special needs to be done or 1 if
3200 * a full commit is required.
3202 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
3203 struct inode *inode,
3204 struct dentry *parent,
3205 struct super_block *sb,
3209 struct btrfs_root *root;
3210 struct dentry *old_parent = NULL;
3213 * for regular files, if its inode is already on disk, we don't
3214 * have to worry about the parents at all. This is because
3215 * we can use the last_unlink_trans field to record renames
3216 * and other fun in this file.
3218 if (S_ISREG(inode->i_mode) &&
3219 BTRFS_I(inode)->generation <= last_committed &&
3220 BTRFS_I(inode)->last_unlink_trans <= last_committed)
3223 if (!S_ISDIR(inode->i_mode)) {
3224 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3226 inode = parent->d_inode;
3230 BTRFS_I(inode)->logged_trans = trans->transid;
3233 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
3234 root = BTRFS_I(inode)->root;
3237 * make sure any commits to the log are forced
3238 * to be full commits
3240 root->fs_info->last_trans_log_full_commit =
3246 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3249 if (IS_ROOT(parent))
3252 parent = dget_parent(parent);
3254 old_parent = parent;
3255 inode = parent->d_inode;
3264 * helper function around btrfs_log_inode to make sure newly created
3265 * parent directories also end up in the log. A minimal inode and backref
3266 * only logging is done of any parent directories that are older than
3267 * the last committed transaction
3269 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3270 struct btrfs_root *root, struct inode *inode,
3271 struct dentry *parent, int exists_only)
3273 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
3274 struct super_block *sb;
3275 struct dentry *old_parent = NULL;
3277 u64 last_committed = root->fs_info->last_trans_committed;
3281 if (btrfs_test_opt(root, NOTREELOG)) {
3286 if (root->fs_info->last_trans_log_full_commit >
3287 root->fs_info->last_trans_committed) {
3292 if (root != BTRFS_I(inode)->root ||
3293 btrfs_root_refs(&root->root_item) == 0) {
3298 ret = check_parent_dirs_for_sync(trans, inode, parent,
3299 sb, last_committed);
3303 if (btrfs_inode_in_log(inode, trans->transid)) {
3304 ret = BTRFS_NO_LOG_SYNC;
3308 ret = start_log_trans(trans, root);
3312 ret = btrfs_log_inode(trans, root, inode, inode_only);
3317 * for regular files, if its inode is already on disk, we don't
3318 * have to worry about the parents at all. This is because
3319 * we can use the last_unlink_trans field to record renames
3320 * and other fun in this file.
3322 if (S_ISREG(inode->i_mode) &&
3323 BTRFS_I(inode)->generation <= last_committed &&
3324 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
3329 inode_only = LOG_INODE_EXISTS;
3331 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3334 inode = parent->d_inode;
3335 if (root != BTRFS_I(inode)->root)
3338 if (BTRFS_I(inode)->generation >
3339 root->fs_info->last_trans_committed) {
3340 ret = btrfs_log_inode(trans, root, inode, inode_only);
3344 if (IS_ROOT(parent))
3347 parent = dget_parent(parent);
3349 old_parent = parent;
3355 WARN_ON(ret != -ENOSPC);
3356 root->fs_info->last_trans_log_full_commit = trans->transid;
3359 btrfs_end_log_trans(root);
3365 * it is not safe to log dentry if the chunk root has added new
3366 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
3367 * If this returns 1, you must commit the transaction to safely get your
3370 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
3371 struct btrfs_root *root, struct dentry *dentry)
3373 struct dentry *parent = dget_parent(dentry);
3376 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0);
3383 * should be called during mount to recover any replay any log trees
3386 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
3389 struct btrfs_path *path;
3390 struct btrfs_trans_handle *trans;
3391 struct btrfs_key key;
3392 struct btrfs_key found_key;
3393 struct btrfs_key tmp_key;
3394 struct btrfs_root *log;
3395 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
3396 struct walk_control wc = {
3397 .process_func = process_one_buffer,
3401 path = btrfs_alloc_path();
3405 fs_info->log_root_recovering = 1;
3407 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3408 if (IS_ERR(trans)) {
3409 ret = PTR_ERR(trans);
3416 ret = walk_log_tree(trans, log_root_tree, &wc);
3418 btrfs_error(fs_info, ret, "Failed to pin buffers while "
3419 "recovering log root tree.");
3424 key.objectid = BTRFS_TREE_LOG_OBJECTID;
3425 key.offset = (u64)-1;
3426 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
3429 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
3432 btrfs_error(fs_info, ret,
3433 "Couldn't find tree log root.");
3437 if (path->slots[0] == 0)
3441 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3443 btrfs_release_path(path);
3444 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
3447 log = btrfs_read_fs_root_no_radix(log_root_tree,
3451 btrfs_error(fs_info, ret,
3452 "Couldn't read tree log root.");
3456 tmp_key.objectid = found_key.offset;
3457 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
3458 tmp_key.offset = (u64)-1;
3460 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3461 if (IS_ERR(wc.replay_dest)) {
3462 ret = PTR_ERR(wc.replay_dest);
3463 btrfs_error(fs_info, ret, "Couldn't read target root "
3464 "for tree log recovery.");
3468 wc.replay_dest->log_root = log;
3469 btrfs_record_root_in_trans(trans, wc.replay_dest);
3470 ret = walk_log_tree(trans, log, &wc);
3473 if (wc.stage == LOG_WALK_REPLAY_ALL) {
3474 ret = fixup_inode_link_counts(trans, wc.replay_dest,
3479 key.offset = found_key.offset - 1;
3480 wc.replay_dest->log_root = NULL;
3481 free_extent_buffer(log->node);
3482 free_extent_buffer(log->commit_root);
3485 if (found_key.offset == 0)
3488 btrfs_release_path(path);
3490 /* step one is to pin it all, step two is to replay just inodes */
3493 wc.process_func = replay_one_buffer;
3494 wc.stage = LOG_WALK_REPLAY_INODES;
3497 /* step three is to replay everything */
3498 if (wc.stage < LOG_WALK_REPLAY_ALL) {
3503 btrfs_free_path(path);
3505 free_extent_buffer(log_root_tree->node);
3506 log_root_tree->log_root = NULL;
3507 fs_info->log_root_recovering = 0;
3509 /* step 4: commit the transaction, which also unpins the blocks */
3510 btrfs_commit_transaction(trans, fs_info->tree_root);
3512 kfree(log_root_tree);
3516 btrfs_free_path(path);
3521 * there are some corner cases where we want to force a full
3522 * commit instead of allowing a directory to be logged.
3524 * They revolve around files there were unlinked from the directory, and
3525 * this function updates the parent directory so that a full commit is
3526 * properly done if it is fsync'd later after the unlinks are done.
3528 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
3529 struct inode *dir, struct inode *inode,
3533 * when we're logging a file, if it hasn't been renamed
3534 * or unlinked, and its inode is fully committed on disk,
3535 * we don't have to worry about walking up the directory chain
3536 * to log its parents.
3538 * So, we use the last_unlink_trans field to put this transid
3539 * into the file. When the file is logged we check it and
3540 * don't log the parents if the file is fully on disk.
3542 if (S_ISREG(inode->i_mode))
3543 BTRFS_I(inode)->last_unlink_trans = trans->transid;
3546 * if this directory was already logged any new
3547 * names for this file/dir will get recorded
3550 if (BTRFS_I(dir)->logged_trans == trans->transid)
3554 * if the inode we're about to unlink was logged,
3555 * the log will be properly updated for any new names
3557 if (BTRFS_I(inode)->logged_trans == trans->transid)
3561 * when renaming files across directories, if the directory
3562 * there we're unlinking from gets fsync'd later on, there's
3563 * no way to find the destination directory later and fsync it
3564 * properly. So, we have to be conservative and force commits
3565 * so the new name gets discovered.
3570 /* we can safely do the unlink without any special recording */
3574 BTRFS_I(dir)->last_unlink_trans = trans->transid;
3578 * Call this after adding a new name for a file and it will properly
3579 * update the log to reflect the new name.
3581 * It will return zero if all goes well, and it will return 1 if a
3582 * full transaction commit is required.
3584 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
3585 struct inode *inode, struct inode *old_dir,
3586 struct dentry *parent)
3588 struct btrfs_root * root = BTRFS_I(inode)->root;
3591 * this will force the logging code to walk the dentry chain
3594 if (S_ISREG(inode->i_mode))
3595 BTRFS_I(inode)->last_unlink_trans = trans->transid;
3598 * if this inode hasn't been logged and directory we're renaming it
3599 * from hasn't been logged, we don't need to log it
3601 if (BTRFS_I(inode)->logged_trans <=
3602 root->fs_info->last_trans_committed &&
3603 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
3604 root->fs_info->last_trans_committed))
3607 return btrfs_log_inode_parent(trans, root, inode, parent, 1);