f1cbd028f7b33e2a693292641cacd21b535a5e75
[pandora-kernel.git] / fs / btrfs / delayed-inode.c
1 /*
2  * Copyright (C) 2011 Fujitsu.  All rights reserved.
3  * Written by Miao Xie <miaox@cn.fujitsu.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24
25 #define BTRFS_DELAYED_WRITEBACK         400
26 #define BTRFS_DELAYED_BACKGROUND        100
27
28 static struct kmem_cache *delayed_node_cache;
29
30 int __init btrfs_delayed_inode_init(void)
31 {
32         delayed_node_cache = kmem_cache_create("delayed_node",
33                                         sizeof(struct btrfs_delayed_node),
34                                         0,
35                                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
36                                         NULL);
37         if (!delayed_node_cache)
38                 return -ENOMEM;
39         return 0;
40 }
41
42 void btrfs_delayed_inode_exit(void)
43 {
44         if (delayed_node_cache)
45                 kmem_cache_destroy(delayed_node_cache);
46 }
47
48 static inline void btrfs_init_delayed_node(
49                                 struct btrfs_delayed_node *delayed_node,
50                                 struct btrfs_root *root, u64 inode_id)
51 {
52         delayed_node->root = root;
53         delayed_node->inode_id = inode_id;
54         atomic_set(&delayed_node->refs, 0);
55         delayed_node->count = 0;
56         delayed_node->in_list = 0;
57         delayed_node->inode_dirty = 0;
58         delayed_node->ins_root = RB_ROOT;
59         delayed_node->del_root = RB_ROOT;
60         mutex_init(&delayed_node->mutex);
61         delayed_node->index_cnt = 0;
62         INIT_LIST_HEAD(&delayed_node->n_list);
63         INIT_LIST_HEAD(&delayed_node->p_list);
64         delayed_node->bytes_reserved = 0;
65 }
66
67 static inline int btrfs_is_continuous_delayed_item(
68                                         struct btrfs_delayed_item *item1,
69                                         struct btrfs_delayed_item *item2)
70 {
71         if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
72             item1->key.objectid == item2->key.objectid &&
73             item1->key.type == item2->key.type &&
74             item1->key.offset + 1 == item2->key.offset)
75                 return 1;
76         return 0;
77 }
78
79 static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
80                                                         struct btrfs_root *root)
81 {
82         return root->fs_info->delayed_root;
83 }
84
85 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
86                                                         struct inode *inode)
87 {
88         struct btrfs_delayed_node *node;
89         struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
90         struct btrfs_root *root = btrfs_inode->root;
91         u64 ino = btrfs_ino(inode);
92         int ret;
93
94 again:
95         node = ACCESS_ONCE(btrfs_inode->delayed_node);
96         if (node) {
97                 atomic_inc(&node->refs);        /* can be accessed */
98                 return node;
99         }
100
101         spin_lock(&root->inode_lock);
102         node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
103         if (node) {
104                 if (btrfs_inode->delayed_node) {
105                         spin_unlock(&root->inode_lock);
106                         goto again;
107                 }
108                 btrfs_inode->delayed_node = node;
109                 atomic_inc(&node->refs);        /* can be accessed */
110                 atomic_inc(&node->refs);        /* cached in the inode */
111                 spin_unlock(&root->inode_lock);
112                 return node;
113         }
114         spin_unlock(&root->inode_lock);
115
116         node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
117         if (!node)
118                 return ERR_PTR(-ENOMEM);
119         btrfs_init_delayed_node(node, root, ino);
120
121         atomic_inc(&node->refs);        /* cached in the btrfs inode */
122         atomic_inc(&node->refs);        /* can be accessed */
123
124         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
125         if (ret) {
126                 kmem_cache_free(delayed_node_cache, node);
127                 return ERR_PTR(ret);
128         }
129
130         spin_lock(&root->inode_lock);
131         ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
132         if (ret == -EEXIST) {
133                 kmem_cache_free(delayed_node_cache, node);
134                 spin_unlock(&root->inode_lock);
135                 radix_tree_preload_end();
136                 goto again;
137         }
138         btrfs_inode->delayed_node = node;
139         spin_unlock(&root->inode_lock);
140         radix_tree_preload_end();
141
142         return node;
143 }
144
145 /*
146  * Call it when holding delayed_node->mutex
147  *
148  * If mod = 1, add this node into the prepared list.
149  */
150 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
151                                      struct btrfs_delayed_node *node,
152                                      int mod)
153 {
154         spin_lock(&root->lock);
155         if (node->in_list) {
156                 if (!list_empty(&node->p_list))
157                         list_move_tail(&node->p_list, &root->prepare_list);
158                 else if (mod)
159                         list_add_tail(&node->p_list, &root->prepare_list);
160         } else {
161                 list_add_tail(&node->n_list, &root->node_list);
162                 list_add_tail(&node->p_list, &root->prepare_list);
163                 atomic_inc(&node->refs);        /* inserted into list */
164                 root->nodes++;
165                 node->in_list = 1;
166         }
167         spin_unlock(&root->lock);
168 }
169
170 /* Call it when holding delayed_node->mutex */
171 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
172                                        struct btrfs_delayed_node *node)
173 {
174         spin_lock(&root->lock);
175         if (node->in_list) {
176                 root->nodes--;
177                 atomic_dec(&node->refs);        /* not in the list */
178                 list_del_init(&node->n_list);
179                 if (!list_empty(&node->p_list))
180                         list_del_init(&node->p_list);
181                 node->in_list = 0;
182         }
183         spin_unlock(&root->lock);
184 }
185
186 struct btrfs_delayed_node *btrfs_first_delayed_node(
187                         struct btrfs_delayed_root *delayed_root)
188 {
189         struct list_head *p;
190         struct btrfs_delayed_node *node = NULL;
191
192         spin_lock(&delayed_root->lock);
193         if (list_empty(&delayed_root->node_list))
194                 goto out;
195
196         p = delayed_root->node_list.next;
197         node = list_entry(p, struct btrfs_delayed_node, n_list);
198         atomic_inc(&node->refs);
199 out:
200         spin_unlock(&delayed_root->lock);
201
202         return node;
203 }
204
205 struct btrfs_delayed_node *btrfs_next_delayed_node(
206                                                 struct btrfs_delayed_node *node)
207 {
208         struct btrfs_delayed_root *delayed_root;
209         struct list_head *p;
210         struct btrfs_delayed_node *next = NULL;
211
212         delayed_root = node->root->fs_info->delayed_root;
213         spin_lock(&delayed_root->lock);
214         if (!node->in_list) {   /* not in the list */
215                 if (list_empty(&delayed_root->node_list))
216                         goto out;
217                 p = delayed_root->node_list.next;
218         } else if (list_is_last(&node->n_list, &delayed_root->node_list))
219                 goto out;
220         else
221                 p = node->n_list.next;
222
223         next = list_entry(p, struct btrfs_delayed_node, n_list);
224         atomic_inc(&next->refs);
225 out:
226         spin_unlock(&delayed_root->lock);
227
228         return next;
229 }
230
231 static void __btrfs_release_delayed_node(
232                                 struct btrfs_delayed_node *delayed_node,
233                                 int mod)
234 {
235         struct btrfs_delayed_root *delayed_root;
236
237         if (!delayed_node)
238                 return;
239
240         delayed_root = delayed_node->root->fs_info->delayed_root;
241
242         mutex_lock(&delayed_node->mutex);
243         if (delayed_node->count)
244                 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
245         else
246                 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
247         mutex_unlock(&delayed_node->mutex);
248
249         if (atomic_dec_and_test(&delayed_node->refs)) {
250                 struct btrfs_root *root = delayed_node->root;
251                 spin_lock(&root->inode_lock);
252                 if (atomic_read(&delayed_node->refs) == 0) {
253                         radix_tree_delete(&root->delayed_nodes_tree,
254                                           delayed_node->inode_id);
255                         kmem_cache_free(delayed_node_cache, delayed_node);
256                 }
257                 spin_unlock(&root->inode_lock);
258         }
259 }
260
261 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
262 {
263         __btrfs_release_delayed_node(node, 0);
264 }
265
266 struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
267                                         struct btrfs_delayed_root *delayed_root)
268 {
269         struct list_head *p;
270         struct btrfs_delayed_node *node = NULL;
271
272         spin_lock(&delayed_root->lock);
273         if (list_empty(&delayed_root->prepare_list))
274                 goto out;
275
276         p = delayed_root->prepare_list.next;
277         list_del_init(p);
278         node = list_entry(p, struct btrfs_delayed_node, p_list);
279         atomic_inc(&node->refs);
280 out:
281         spin_unlock(&delayed_root->lock);
282
283         return node;
284 }
285
286 static inline void btrfs_release_prepared_delayed_node(
287                                         struct btrfs_delayed_node *node)
288 {
289         __btrfs_release_delayed_node(node, 1);
290 }
291
292 struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
293 {
294         struct btrfs_delayed_item *item;
295         item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
296         if (item) {
297                 item->data_len = data_len;
298                 item->ins_or_del = 0;
299                 item->bytes_reserved = 0;
300                 item->delayed_node = NULL;
301                 atomic_set(&item->refs, 1);
302         }
303         return item;
304 }
305
306 /*
307  * __btrfs_lookup_delayed_item - look up the delayed item by key
308  * @delayed_node: pointer to the delayed node
309  * @key:          the key to look up
310  * @prev:         used to store the prev item if the right item isn't found
311  * @next:         used to store the next item if the right item isn't found
312  *
313  * Note: if we don't find the right item, we will return the prev item and
314  * the next item.
315  */
316 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
317                                 struct rb_root *root,
318                                 struct btrfs_key *key,
319                                 struct btrfs_delayed_item **prev,
320                                 struct btrfs_delayed_item **next)
321 {
322         struct rb_node *node, *prev_node = NULL;
323         struct btrfs_delayed_item *delayed_item = NULL;
324         int ret = 0;
325
326         node = root->rb_node;
327
328         while (node) {
329                 delayed_item = rb_entry(node, struct btrfs_delayed_item,
330                                         rb_node);
331                 prev_node = node;
332                 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
333                 if (ret < 0)
334                         node = node->rb_right;
335                 else if (ret > 0)
336                         node = node->rb_left;
337                 else
338                         return delayed_item;
339         }
340
341         if (prev) {
342                 if (!prev_node)
343                         *prev = NULL;
344                 else if (ret < 0)
345                         *prev = delayed_item;
346                 else if ((node = rb_prev(prev_node)) != NULL) {
347                         *prev = rb_entry(node, struct btrfs_delayed_item,
348                                          rb_node);
349                 } else
350                         *prev = NULL;
351         }
352
353         if (next) {
354                 if (!prev_node)
355                         *next = NULL;
356                 else if (ret > 0)
357                         *next = delayed_item;
358                 else if ((node = rb_next(prev_node)) != NULL) {
359                         *next = rb_entry(node, struct btrfs_delayed_item,
360                                          rb_node);
361                 } else
362                         *next = NULL;
363         }
364         return NULL;
365 }
366
367 struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
368                                         struct btrfs_delayed_node *delayed_node,
369                                         struct btrfs_key *key)
370 {
371         struct btrfs_delayed_item *item;
372
373         item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
374                                            NULL, NULL);
375         return item;
376 }
377
378 struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
379                                         struct btrfs_delayed_node *delayed_node,
380                                         struct btrfs_key *key)
381 {
382         struct btrfs_delayed_item *item;
383
384         item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
385                                            NULL, NULL);
386         return item;
387 }
388
389 struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
390                                         struct btrfs_delayed_node *delayed_node,
391                                         struct btrfs_key *key)
392 {
393         struct btrfs_delayed_item *item, *next;
394
395         item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
396                                            NULL, &next);
397         if (!item)
398                 item = next;
399
400         return item;
401 }
402
403 struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
404                                         struct btrfs_delayed_node *delayed_node,
405                                         struct btrfs_key *key)
406 {
407         struct btrfs_delayed_item *item, *next;
408
409         item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
410                                            NULL, &next);
411         if (!item)
412                 item = next;
413
414         return item;
415 }
416
417 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
418                                     struct btrfs_delayed_item *ins,
419                                     int action)
420 {
421         struct rb_node **p, *node;
422         struct rb_node *parent_node = NULL;
423         struct rb_root *root;
424         struct btrfs_delayed_item *item;
425         int cmp;
426
427         if (action == BTRFS_DELAYED_INSERTION_ITEM)
428                 root = &delayed_node->ins_root;
429         else if (action == BTRFS_DELAYED_DELETION_ITEM)
430                 root = &delayed_node->del_root;
431         else
432                 BUG();
433         p = &root->rb_node;
434         node = &ins->rb_node;
435
436         while (*p) {
437                 parent_node = *p;
438                 item = rb_entry(parent_node, struct btrfs_delayed_item,
439                                  rb_node);
440
441                 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
442                 if (cmp < 0)
443                         p = &(*p)->rb_right;
444                 else if (cmp > 0)
445                         p = &(*p)->rb_left;
446                 else
447                         return -EEXIST;
448         }
449
450         rb_link_node(node, parent_node, p);
451         rb_insert_color(node, root);
452         ins->delayed_node = delayed_node;
453         ins->ins_or_del = action;
454
455         if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
456             action == BTRFS_DELAYED_INSERTION_ITEM &&
457             ins->key.offset >= delayed_node->index_cnt)
458                         delayed_node->index_cnt = ins->key.offset + 1;
459
460         delayed_node->count++;
461         atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
462         return 0;
463 }
464
465 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
466                                               struct btrfs_delayed_item *item)
467 {
468         return __btrfs_add_delayed_item(node, item,
469                                         BTRFS_DELAYED_INSERTION_ITEM);
470 }
471
472 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
473                                              struct btrfs_delayed_item *item)
474 {
475         return __btrfs_add_delayed_item(node, item,
476                                         BTRFS_DELAYED_DELETION_ITEM);
477 }
478
479 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
480 {
481         struct rb_root *root;
482         struct btrfs_delayed_root *delayed_root;
483
484         delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
485
486         BUG_ON(!delayed_root);
487         BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
488                delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
489
490         if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
491                 root = &delayed_item->delayed_node->ins_root;
492         else
493                 root = &delayed_item->delayed_node->del_root;
494
495         rb_erase(&delayed_item->rb_node, root);
496         delayed_item->delayed_node->count--;
497         atomic_dec(&delayed_root->items);
498         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
499             waitqueue_active(&delayed_root->wait))
500                 wake_up(&delayed_root->wait);
501 }
502
503 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
504 {
505         if (item) {
506                 __btrfs_remove_delayed_item(item);
507                 if (atomic_dec_and_test(&item->refs))
508                         kfree(item);
509         }
510 }
511
512 struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
513                                         struct btrfs_delayed_node *delayed_node)
514 {
515         struct rb_node *p;
516         struct btrfs_delayed_item *item = NULL;
517
518         p = rb_first(&delayed_node->ins_root);
519         if (p)
520                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
521
522         return item;
523 }
524
525 struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
526                                         struct btrfs_delayed_node *delayed_node)
527 {
528         struct rb_node *p;
529         struct btrfs_delayed_item *item = NULL;
530
531         p = rb_first(&delayed_node->del_root);
532         if (p)
533                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
534
535         return item;
536 }
537
538 struct btrfs_delayed_item *__btrfs_next_delayed_item(
539                                                 struct btrfs_delayed_item *item)
540 {
541         struct rb_node *p;
542         struct btrfs_delayed_item *next = NULL;
543
544         p = rb_next(&item->rb_node);
545         if (p)
546                 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
547
548         return next;
549 }
550
551 static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
552                                                         struct inode *inode)
553 {
554         struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
555         struct btrfs_delayed_node *delayed_node;
556
557         delayed_node = btrfs_inode->delayed_node;
558         if (delayed_node)
559                 atomic_inc(&delayed_node->refs);
560
561         return delayed_node;
562 }
563
564 static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
565                                                    u64 root_id)
566 {
567         struct btrfs_key root_key;
568
569         if (root->objectid == root_id)
570                 return root;
571
572         root_key.objectid = root_id;
573         root_key.type = BTRFS_ROOT_ITEM_KEY;
574         root_key.offset = (u64)-1;
575         return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
576 }
577
578 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
579                                                struct btrfs_root *root,
580                                                struct btrfs_delayed_item *item)
581 {
582         struct btrfs_block_rsv *src_rsv;
583         struct btrfs_block_rsv *dst_rsv;
584         u64 num_bytes;
585         int ret;
586
587         if (!trans->bytes_reserved)
588                 return 0;
589
590         src_rsv = trans->block_rsv;
591         dst_rsv = &root->fs_info->global_block_rsv;
592
593         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
594         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
595         if (!ret)
596                 item->bytes_reserved = num_bytes;
597
598         return ret;
599 }
600
601 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
602                                                 struct btrfs_delayed_item *item)
603 {
604         struct btrfs_block_rsv *rsv;
605
606         if (!item->bytes_reserved)
607                 return;
608
609         rsv = &root->fs_info->global_block_rsv;
610         btrfs_block_rsv_release(root, rsv,
611                                 item->bytes_reserved);
612 }
613
614 static int btrfs_delayed_inode_reserve_metadata(
615                                         struct btrfs_trans_handle *trans,
616                                         struct btrfs_root *root,
617                                         struct btrfs_delayed_node *node)
618 {
619         struct btrfs_block_rsv *src_rsv;
620         struct btrfs_block_rsv *dst_rsv;
621         u64 num_bytes;
622         int ret;
623
624         if (!trans->bytes_reserved)
625                 return 0;
626
627         src_rsv = trans->block_rsv;
628         dst_rsv = &root->fs_info->global_block_rsv;
629
630         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
631         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
632         if (!ret)
633                 node->bytes_reserved = num_bytes;
634
635         return ret;
636 }
637
638 static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
639                                                 struct btrfs_delayed_node *node)
640 {
641         struct btrfs_block_rsv *rsv;
642
643         if (!node->bytes_reserved)
644                 return;
645
646         rsv = &root->fs_info->global_block_rsv;
647         btrfs_block_rsv_release(root, rsv,
648                                 node->bytes_reserved);
649         node->bytes_reserved = 0;
650 }
651
652 /*
653  * This helper will insert some continuous items into the same leaf according
654  * to the free space of the leaf.
655  */
656 static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
657                                 struct btrfs_root *root,
658                                 struct btrfs_path *path,
659                                 struct btrfs_delayed_item *item)
660 {
661         struct btrfs_delayed_item *curr, *next;
662         int free_space;
663         int total_data_size = 0, total_size = 0;
664         struct extent_buffer *leaf;
665         char *data_ptr;
666         struct btrfs_key *keys;
667         u32 *data_size;
668         struct list_head head;
669         int slot;
670         int nitems;
671         int i;
672         int ret = 0;
673
674         BUG_ON(!path->nodes[0]);
675
676         leaf = path->nodes[0];
677         free_space = btrfs_leaf_free_space(root, leaf);
678         INIT_LIST_HEAD(&head);
679
680         next = item;
681         nitems = 0;
682
683         /*
684          * count the number of the continuous items that we can insert in batch
685          */
686         while (total_size + next->data_len + sizeof(struct btrfs_item) <=
687                free_space) {
688                 total_data_size += next->data_len;
689                 total_size += next->data_len + sizeof(struct btrfs_item);
690                 list_add_tail(&next->tree_list, &head);
691                 nitems++;
692
693                 curr = next;
694                 next = __btrfs_next_delayed_item(curr);
695                 if (!next)
696                         break;
697
698                 if (!btrfs_is_continuous_delayed_item(curr, next))
699                         break;
700         }
701
702         if (!nitems) {
703                 ret = 0;
704                 goto out;
705         }
706
707         /*
708          * we need allocate some memory space, but it might cause the task
709          * to sleep, so we set all locked nodes in the path to blocking locks
710          * first.
711          */
712         btrfs_set_path_blocking(path);
713
714         keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
715         if (!keys) {
716                 ret = -ENOMEM;
717                 goto out;
718         }
719
720         data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
721         if (!data_size) {
722                 ret = -ENOMEM;
723                 goto error;
724         }
725
726         /* get keys of all the delayed items */
727         i = 0;
728         list_for_each_entry(next, &head, tree_list) {
729                 keys[i] = next->key;
730                 data_size[i] = next->data_len;
731                 i++;
732         }
733
734         /* reset all the locked nodes in the patch to spinning locks. */
735         btrfs_clear_path_blocking(path, NULL);
736
737         /* insert the keys of the items */
738         ret = setup_items_for_insert(trans, root, path, keys, data_size,
739                                      total_data_size, total_size, nitems);
740         if (ret)
741                 goto error;
742
743         /* insert the dir index items */
744         slot = path->slots[0];
745         list_for_each_entry_safe(curr, next, &head, tree_list) {
746                 data_ptr = btrfs_item_ptr(leaf, slot, char);
747                 write_extent_buffer(leaf, &curr->data,
748                                     (unsigned long)data_ptr,
749                                     curr->data_len);
750                 slot++;
751
752                 btrfs_delayed_item_release_metadata(root, curr);
753
754                 list_del(&curr->tree_list);
755                 btrfs_release_delayed_item(curr);
756         }
757
758 error:
759         kfree(data_size);
760         kfree(keys);
761 out:
762         return ret;
763 }
764
765 /*
766  * This helper can just do simple insertion that needn't extend item for new
767  * data, such as directory name index insertion, inode insertion.
768  */
769 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
770                                      struct btrfs_root *root,
771                                      struct btrfs_path *path,
772                                      struct btrfs_delayed_item *delayed_item)
773 {
774         struct extent_buffer *leaf;
775         struct btrfs_item *item;
776         char *ptr;
777         int ret;
778
779         ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
780                                       delayed_item->data_len);
781         if (ret < 0 && ret != -EEXIST)
782                 return ret;
783
784         leaf = path->nodes[0];
785
786         item = btrfs_item_nr(leaf, path->slots[0]);
787         ptr = btrfs_item_ptr(leaf, path->slots[0], char);
788
789         write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
790                             delayed_item->data_len);
791         btrfs_mark_buffer_dirty(leaf);
792
793         btrfs_delayed_item_release_metadata(root, delayed_item);
794         return 0;
795 }
796
797 /*
798  * we insert an item first, then if there are some continuous items, we try
799  * to insert those items into the same leaf.
800  */
801 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
802                                       struct btrfs_path *path,
803                                       struct btrfs_root *root,
804                                       struct btrfs_delayed_node *node)
805 {
806         struct btrfs_delayed_item *curr, *prev;
807         int ret = 0;
808
809 do_again:
810         mutex_lock(&node->mutex);
811         curr = __btrfs_first_delayed_insertion_item(node);
812         if (!curr)
813                 goto insert_end;
814
815         ret = btrfs_insert_delayed_item(trans, root, path, curr);
816         if (ret < 0) {
817                 btrfs_release_path(path);
818                 goto insert_end;
819         }
820
821         prev = curr;
822         curr = __btrfs_next_delayed_item(prev);
823         if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
824                 /* insert the continuous items into the same leaf */
825                 path->slots[0]++;
826                 btrfs_batch_insert_items(trans, root, path, curr);
827         }
828         btrfs_release_delayed_item(prev);
829         btrfs_mark_buffer_dirty(path->nodes[0]);
830
831         btrfs_release_path(path);
832         mutex_unlock(&node->mutex);
833         goto do_again;
834
835 insert_end:
836         mutex_unlock(&node->mutex);
837         return ret;
838 }
839
840 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
841                                     struct btrfs_root *root,
842                                     struct btrfs_path *path,
843                                     struct btrfs_delayed_item *item)
844 {
845         struct btrfs_delayed_item *curr, *next;
846         struct extent_buffer *leaf;
847         struct btrfs_key key;
848         struct list_head head;
849         int nitems, i, last_item;
850         int ret = 0;
851
852         BUG_ON(!path->nodes[0]);
853
854         leaf = path->nodes[0];
855
856         i = path->slots[0];
857         last_item = btrfs_header_nritems(leaf) - 1;
858         if (i > last_item)
859                 return -ENOENT; /* FIXME: Is errno suitable? */
860
861         next = item;
862         INIT_LIST_HEAD(&head);
863         btrfs_item_key_to_cpu(leaf, &key, i);
864         nitems = 0;
865         /*
866          * count the number of the dir index items that we can delete in batch
867          */
868         while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
869                 list_add_tail(&next->tree_list, &head);
870                 nitems++;
871
872                 curr = next;
873                 next = __btrfs_next_delayed_item(curr);
874                 if (!next)
875                         break;
876
877                 if (!btrfs_is_continuous_delayed_item(curr, next))
878                         break;
879
880                 i++;
881                 if (i > last_item)
882                         break;
883                 btrfs_item_key_to_cpu(leaf, &key, i);
884         }
885
886         if (!nitems)
887                 return 0;
888
889         ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
890         if (ret)
891                 goto out;
892
893         list_for_each_entry_safe(curr, next, &head, tree_list) {
894                 btrfs_delayed_item_release_metadata(root, curr);
895                 list_del(&curr->tree_list);
896                 btrfs_release_delayed_item(curr);
897         }
898
899 out:
900         return ret;
901 }
902
903 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
904                                       struct btrfs_path *path,
905                                       struct btrfs_root *root,
906                                       struct btrfs_delayed_node *node)
907 {
908         struct btrfs_delayed_item *curr, *prev;
909         int ret = 0;
910
911 do_again:
912         mutex_lock(&node->mutex);
913         curr = __btrfs_first_delayed_deletion_item(node);
914         if (!curr)
915                 goto delete_fail;
916
917         ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
918         if (ret < 0)
919                 goto delete_fail;
920         else if (ret > 0) {
921                 /*
922                  * can't find the item which the node points to, so this node
923                  * is invalid, just drop it.
924                  */
925                 prev = curr;
926                 curr = __btrfs_next_delayed_item(prev);
927                 btrfs_release_delayed_item(prev);
928                 ret = 0;
929                 btrfs_release_path(path);
930                 if (curr)
931                         goto do_again;
932                 else
933                         goto delete_fail;
934         }
935
936         btrfs_batch_delete_items(trans, root, path, curr);
937         btrfs_release_path(path);
938         mutex_unlock(&node->mutex);
939         goto do_again;
940
941 delete_fail:
942         btrfs_release_path(path);
943         mutex_unlock(&node->mutex);
944         return ret;
945 }
946
947 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
948 {
949         struct btrfs_delayed_root *delayed_root;
950
951         if (delayed_node && delayed_node->inode_dirty) {
952                 BUG_ON(!delayed_node->root);
953                 delayed_node->inode_dirty = 0;
954                 delayed_node->count--;
955
956                 delayed_root = delayed_node->root->fs_info->delayed_root;
957                 atomic_dec(&delayed_root->items);
958                 if (atomic_read(&delayed_root->items) <
959                     BTRFS_DELAYED_BACKGROUND &&
960                     waitqueue_active(&delayed_root->wait))
961                         wake_up(&delayed_root->wait);
962         }
963 }
964
965 static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
966                                       struct btrfs_root *root,
967                                       struct btrfs_path *path,
968                                       struct btrfs_delayed_node *node)
969 {
970         struct btrfs_key key;
971         struct btrfs_inode_item *inode_item;
972         struct extent_buffer *leaf;
973         int ret;
974
975         mutex_lock(&node->mutex);
976         if (!node->inode_dirty) {
977                 mutex_unlock(&node->mutex);
978                 return 0;
979         }
980
981         key.objectid = node->inode_id;
982         btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
983         key.offset = 0;
984         ret = btrfs_lookup_inode(trans, root, path, &key, 1);
985         if (ret > 0) {
986                 btrfs_release_path(path);
987                 mutex_unlock(&node->mutex);
988                 return -ENOENT;
989         } else if (ret < 0) {
990                 mutex_unlock(&node->mutex);
991                 return ret;
992         }
993
994         btrfs_unlock_up_safe(path, 1);
995         leaf = path->nodes[0];
996         inode_item = btrfs_item_ptr(leaf, path->slots[0],
997                                     struct btrfs_inode_item);
998         write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
999                             sizeof(struct btrfs_inode_item));
1000         btrfs_mark_buffer_dirty(leaf);
1001         btrfs_release_path(path);
1002
1003         btrfs_delayed_inode_release_metadata(root, node);
1004         btrfs_release_delayed_inode(node);
1005         mutex_unlock(&node->mutex);
1006
1007         return 0;
1008 }
1009
1010 /* Called when committing the transaction. */
1011 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1012                             struct btrfs_root *root)
1013 {
1014         struct btrfs_delayed_root *delayed_root;
1015         struct btrfs_delayed_node *curr_node, *prev_node;
1016         struct btrfs_path *path;
1017         struct btrfs_block_rsv *block_rsv;
1018         int ret = 0;
1019
1020         path = btrfs_alloc_path();
1021         if (!path)
1022                 return -ENOMEM;
1023         path->leave_spinning = 1;
1024
1025         block_rsv = trans->block_rsv;
1026         trans->block_rsv = &root->fs_info->global_block_rsv;
1027
1028         delayed_root = btrfs_get_delayed_root(root);
1029
1030         curr_node = btrfs_first_delayed_node(delayed_root);
1031         while (curr_node) {
1032                 root = curr_node->root;
1033                 ret = btrfs_insert_delayed_items(trans, path, root,
1034                                                  curr_node);
1035                 if (!ret)
1036                         ret = btrfs_delete_delayed_items(trans, path, root,
1037                                                          curr_node);
1038                 if (!ret)
1039                         ret = btrfs_update_delayed_inode(trans, root, path,
1040                                                          curr_node);
1041                 if (ret) {
1042                         btrfs_release_delayed_node(curr_node);
1043                         break;
1044                 }
1045
1046                 prev_node = curr_node;
1047                 curr_node = btrfs_next_delayed_node(curr_node);
1048                 btrfs_release_delayed_node(prev_node);
1049         }
1050
1051         btrfs_free_path(path);
1052         trans->block_rsv = block_rsv;
1053         return ret;
1054 }
1055
1056 static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1057                                               struct btrfs_delayed_node *node)
1058 {
1059         struct btrfs_path *path;
1060         struct btrfs_block_rsv *block_rsv;
1061         int ret;
1062
1063         path = btrfs_alloc_path();
1064         if (!path)
1065                 return -ENOMEM;
1066         path->leave_spinning = 1;
1067
1068         block_rsv = trans->block_rsv;
1069         trans->block_rsv = &node->root->fs_info->global_block_rsv;
1070
1071         ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1072         if (!ret)
1073                 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1074         if (!ret)
1075                 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1076         btrfs_free_path(path);
1077
1078         trans->block_rsv = block_rsv;
1079         return ret;
1080 }
1081
1082 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1083                                      struct inode *inode)
1084 {
1085         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1086         int ret;
1087
1088         if (!delayed_node)
1089                 return 0;
1090
1091         mutex_lock(&delayed_node->mutex);
1092         if (!delayed_node->count) {
1093                 mutex_unlock(&delayed_node->mutex);
1094                 btrfs_release_delayed_node(delayed_node);
1095                 return 0;
1096         }
1097         mutex_unlock(&delayed_node->mutex);
1098
1099         ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
1100         btrfs_release_delayed_node(delayed_node);
1101         return ret;
1102 }
1103
1104 void btrfs_remove_delayed_node(struct inode *inode)
1105 {
1106         struct btrfs_delayed_node *delayed_node;
1107
1108         delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1109         if (!delayed_node)
1110                 return;
1111
1112         BTRFS_I(inode)->delayed_node = NULL;
1113         btrfs_release_delayed_node(delayed_node);
1114 }
1115
1116 struct btrfs_async_delayed_node {
1117         struct btrfs_root *root;
1118         struct btrfs_delayed_node *delayed_node;
1119         struct btrfs_work work;
1120 };
1121
1122 static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1123 {
1124         struct btrfs_async_delayed_node *async_node;
1125         struct btrfs_trans_handle *trans;
1126         struct btrfs_path *path;
1127         struct btrfs_delayed_node *delayed_node = NULL;
1128         struct btrfs_root *root;
1129         struct btrfs_block_rsv *block_rsv;
1130         unsigned long nr = 0;
1131         int need_requeue = 0;
1132         int ret;
1133
1134         async_node = container_of(work, struct btrfs_async_delayed_node, work);
1135
1136         path = btrfs_alloc_path();
1137         if (!path)
1138                 goto out;
1139         path->leave_spinning = 1;
1140
1141         delayed_node = async_node->delayed_node;
1142         root = delayed_node->root;
1143
1144         trans = btrfs_join_transaction(root);
1145         if (IS_ERR(trans))
1146                 goto free_path;
1147
1148         block_rsv = trans->block_rsv;
1149         trans->block_rsv = &root->fs_info->global_block_rsv;
1150
1151         ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1152         if (!ret)
1153                 ret = btrfs_delete_delayed_items(trans, path, root,
1154                                                  delayed_node);
1155
1156         if (!ret)
1157                 btrfs_update_delayed_inode(trans, root, path, delayed_node);
1158
1159         /*
1160          * Maybe new delayed items have been inserted, so we need requeue
1161          * the work. Besides that, we must dequeue the empty delayed nodes
1162          * to avoid the race between delayed items balance and the worker.
1163          * The race like this:
1164          *      Task1                           Worker thread
1165          *                                      count == 0, needn't requeue
1166          *                                        also needn't insert the
1167          *                                        delayed node into prepare
1168          *                                        list again.
1169          *      add lots of delayed items
1170          *      queue the delayed node
1171          *        already in the list,
1172          *        and not in the prepare
1173          *        list, it means the delayed
1174          *        node is being dealt with
1175          *        by the worker.
1176          *      do delayed items balance
1177          *        the delayed node is being
1178          *        dealt with by the worker
1179          *        now, just wait.
1180          *                                      the worker goto idle.
1181          * Task1 will sleep until the transaction is commited.
1182          */
1183         mutex_lock(&delayed_node->mutex);
1184         if (delayed_node->count)
1185                 need_requeue = 1;
1186         else
1187                 btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
1188                                            delayed_node);
1189         mutex_unlock(&delayed_node->mutex);
1190
1191         nr = trans->blocks_used;
1192
1193         trans->block_rsv = block_rsv;
1194         btrfs_end_transaction_dmeta(trans, root);
1195         __btrfs_btree_balance_dirty(root, nr);
1196 free_path:
1197         btrfs_free_path(path);
1198 out:
1199         if (need_requeue)
1200                 btrfs_requeue_work(&async_node->work);
1201         else {
1202                 btrfs_release_prepared_delayed_node(delayed_node);
1203                 kfree(async_node);
1204         }
1205 }
1206
1207 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1208                                      struct btrfs_root *root, int all)
1209 {
1210         struct btrfs_async_delayed_node *async_node;
1211         struct btrfs_delayed_node *curr;
1212         int count = 0;
1213
1214 again:
1215         curr = btrfs_first_prepared_delayed_node(delayed_root);
1216         if (!curr)
1217                 return 0;
1218
1219         async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
1220         if (!async_node) {
1221                 btrfs_release_prepared_delayed_node(curr);
1222                 return -ENOMEM;
1223         }
1224
1225         async_node->root = root;
1226         async_node->delayed_node = curr;
1227
1228         async_node->work.func = btrfs_async_run_delayed_node_done;
1229         async_node->work.flags = 0;
1230
1231         btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
1232         count++;
1233
1234         if (all || count < 4)
1235                 goto again;
1236
1237         return 0;
1238 }
1239
1240 void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1241 {
1242         struct btrfs_delayed_root *delayed_root;
1243         delayed_root = btrfs_get_delayed_root(root);
1244         WARN_ON(btrfs_first_delayed_node(delayed_root));
1245 }
1246
1247 void btrfs_balance_delayed_items(struct btrfs_root *root)
1248 {
1249         struct btrfs_delayed_root *delayed_root;
1250
1251         delayed_root = btrfs_get_delayed_root(root);
1252
1253         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1254                 return;
1255
1256         if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1257                 int ret;
1258                 ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
1259                 if (ret)
1260                         return;
1261
1262                 wait_event_interruptible_timeout(
1263                                 delayed_root->wait,
1264                                 (atomic_read(&delayed_root->items) <
1265                                  BTRFS_DELAYED_BACKGROUND),
1266                                 HZ);
1267                 return;
1268         }
1269
1270         btrfs_wq_run_delayed_node(delayed_root, root, 0);
1271 }
1272
1273 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1274                                    struct btrfs_root *root, const char *name,
1275                                    int name_len, struct inode *dir,
1276                                    struct btrfs_disk_key *disk_key, u8 type,
1277                                    u64 index)
1278 {
1279         struct btrfs_delayed_node *delayed_node;
1280         struct btrfs_delayed_item *delayed_item;
1281         struct btrfs_dir_item *dir_item;
1282         int ret;
1283
1284         delayed_node = btrfs_get_or_create_delayed_node(dir);
1285         if (IS_ERR(delayed_node))
1286                 return PTR_ERR(delayed_node);
1287
1288         delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1289         if (!delayed_item) {
1290                 ret = -ENOMEM;
1291                 goto release_node;
1292         }
1293
1294         ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1295         /*
1296          * we have reserved enough space when we start a new transaction,
1297          * so reserving metadata failure is impossible
1298          */
1299         BUG_ON(ret);
1300
1301         delayed_item->key.objectid = btrfs_ino(dir);
1302         btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1303         delayed_item->key.offset = index;
1304
1305         dir_item = (struct btrfs_dir_item *)delayed_item->data;
1306         dir_item->location = *disk_key;
1307         dir_item->transid = cpu_to_le64(trans->transid);
1308         dir_item->data_len = 0;
1309         dir_item->name_len = cpu_to_le16(name_len);
1310         dir_item->type = type;
1311         memcpy((char *)(dir_item + 1), name, name_len);
1312
1313         mutex_lock(&delayed_node->mutex);
1314         ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1315         if (unlikely(ret)) {
1316                 printk(KERN_ERR "err add delayed dir index item(name: %s) into "
1317                                 "the insertion tree of the delayed node"
1318                                 "(root id: %llu, inode id: %llu, errno: %d)\n",
1319                                 name,
1320                                 (unsigned long long)delayed_node->root->objectid,
1321                                 (unsigned long long)delayed_node->inode_id,
1322                                 ret);
1323                 BUG();
1324         }
1325         mutex_unlock(&delayed_node->mutex);
1326
1327 release_node:
1328         btrfs_release_delayed_node(delayed_node);
1329         return ret;
1330 }
1331
1332 static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1333                                                struct btrfs_delayed_node *node,
1334                                                struct btrfs_key *key)
1335 {
1336         struct btrfs_delayed_item *item;
1337
1338         mutex_lock(&node->mutex);
1339         item = __btrfs_lookup_delayed_insertion_item(node, key);
1340         if (!item) {
1341                 mutex_unlock(&node->mutex);
1342                 return 1;
1343         }
1344
1345         btrfs_delayed_item_release_metadata(root, item);
1346         btrfs_release_delayed_item(item);
1347         mutex_unlock(&node->mutex);
1348         return 0;
1349 }
1350
1351 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1352                                    struct btrfs_root *root, struct inode *dir,
1353                                    u64 index)
1354 {
1355         struct btrfs_delayed_node *node;
1356         struct btrfs_delayed_item *item;
1357         struct btrfs_key item_key;
1358         int ret;
1359
1360         node = btrfs_get_or_create_delayed_node(dir);
1361         if (IS_ERR(node))
1362                 return PTR_ERR(node);
1363
1364         item_key.objectid = btrfs_ino(dir);
1365         btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1366         item_key.offset = index;
1367
1368         ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1369         if (!ret)
1370                 goto end;
1371
1372         item = btrfs_alloc_delayed_item(0);
1373         if (!item) {
1374                 ret = -ENOMEM;
1375                 goto end;
1376         }
1377
1378         item->key = item_key;
1379
1380         ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1381         /*
1382          * we have reserved enough space when we start a new transaction,
1383          * so reserving metadata failure is impossible.
1384          */
1385         BUG_ON(ret);
1386
1387         mutex_lock(&node->mutex);
1388         ret = __btrfs_add_delayed_deletion_item(node, item);
1389         if (unlikely(ret)) {
1390                 printk(KERN_ERR "err add delayed dir index item(index: %llu) "
1391                                 "into the deletion tree of the delayed node"
1392                                 "(root id: %llu, inode id: %llu, errno: %d)\n",
1393                                 (unsigned long long)index,
1394                                 (unsigned long long)node->root->objectid,
1395                                 (unsigned long long)node->inode_id,
1396                                 ret);
1397                 BUG();
1398         }
1399         mutex_unlock(&node->mutex);
1400 end:
1401         btrfs_release_delayed_node(node);
1402         return ret;
1403 }
1404
1405 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1406 {
1407         struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
1408         int ret = 0;
1409
1410         if (!delayed_node)
1411                 return -ENOENT;
1412
1413         /*
1414          * Since we have held i_mutex of this directory, it is impossible that
1415          * a new directory index is added into the delayed node and index_cnt
1416          * is updated now. So we needn't lock the delayed node.
1417          */
1418         if (!delayed_node->index_cnt)
1419                 return -EINVAL;
1420
1421         BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1422         return ret;
1423 }
1424
1425 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1426                              struct list_head *del_list)
1427 {
1428         struct btrfs_delayed_node *delayed_node;
1429         struct btrfs_delayed_item *item;
1430
1431         delayed_node = btrfs_get_delayed_node(inode);
1432         if (!delayed_node)
1433                 return;
1434
1435         mutex_lock(&delayed_node->mutex);
1436         item = __btrfs_first_delayed_insertion_item(delayed_node);
1437         while (item) {
1438                 atomic_inc(&item->refs);
1439                 list_add_tail(&item->readdir_list, ins_list);
1440                 item = __btrfs_next_delayed_item(item);
1441         }
1442
1443         item = __btrfs_first_delayed_deletion_item(delayed_node);
1444         while (item) {
1445                 atomic_inc(&item->refs);
1446                 list_add_tail(&item->readdir_list, del_list);
1447                 item = __btrfs_next_delayed_item(item);
1448         }
1449         mutex_unlock(&delayed_node->mutex);
1450         /*
1451          * This delayed node is still cached in the btrfs inode, so refs
1452          * must be > 1 now, and we needn't check it is going to be freed
1453          * or not.
1454          *
1455          * Besides that, this function is used to read dir, we do not
1456          * insert/delete delayed items in this period. So we also needn't
1457          * requeue or dequeue this delayed node.
1458          */
1459         atomic_dec(&delayed_node->refs);
1460 }
1461
1462 void btrfs_put_delayed_items(struct list_head *ins_list,
1463                              struct list_head *del_list)
1464 {
1465         struct btrfs_delayed_item *curr, *next;
1466
1467         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1468                 list_del(&curr->readdir_list);
1469                 if (atomic_dec_and_test(&curr->refs))
1470                         kfree(curr);
1471         }
1472
1473         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1474                 list_del(&curr->readdir_list);
1475                 if (atomic_dec_and_test(&curr->refs))
1476                         kfree(curr);
1477         }
1478 }
1479
1480 int btrfs_should_delete_dir_index(struct list_head *del_list,
1481                                   u64 index)
1482 {
1483         struct btrfs_delayed_item *curr, *next;
1484         int ret;
1485
1486         if (list_empty(del_list))
1487                 return 0;
1488
1489         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1490                 if (curr->key.offset > index)
1491                         break;
1492
1493                 list_del(&curr->readdir_list);
1494                 ret = (curr->key.offset == index);
1495
1496                 if (atomic_dec_and_test(&curr->refs))
1497                         kfree(curr);
1498
1499                 if (ret)
1500                         return 1;
1501                 else
1502                         continue;
1503         }
1504         return 0;
1505 }
1506
1507 /*
1508  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1509  *
1510  */
1511 int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
1512                                     filldir_t filldir,
1513                                     struct list_head *ins_list)
1514 {
1515         struct btrfs_dir_item *di;
1516         struct btrfs_delayed_item *curr, *next;
1517         struct btrfs_key location;
1518         char *name;
1519         int name_len;
1520         int over = 0;
1521         unsigned char d_type;
1522
1523         if (list_empty(ins_list))
1524                 return 0;
1525
1526         /*
1527          * Changing the data of the delayed item is impossible. So
1528          * we needn't lock them. And we have held i_mutex of the
1529          * directory, nobody can delete any directory indexes now.
1530          */
1531         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1532                 list_del(&curr->readdir_list);
1533
1534                 if (curr->key.offset < filp->f_pos) {
1535                         if (atomic_dec_and_test(&curr->refs))
1536                                 kfree(curr);
1537                         continue;
1538                 }
1539
1540                 filp->f_pos = curr->key.offset;
1541
1542                 di = (struct btrfs_dir_item *)curr->data;
1543                 name = (char *)(di + 1);
1544                 name_len = le16_to_cpu(di->name_len);
1545
1546                 d_type = btrfs_filetype_table[di->type];
1547                 btrfs_disk_key_to_cpu(&location, &di->location);
1548
1549                 over = filldir(dirent, name, name_len, curr->key.offset,
1550                                location.objectid, d_type);
1551
1552                 if (atomic_dec_and_test(&curr->refs))
1553                         kfree(curr);
1554
1555                 if (over)
1556                         return 1;
1557         }
1558         return 0;
1559 }
1560
1561 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
1562                          generation, 64);
1563 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
1564                          sequence, 64);
1565 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
1566                          transid, 64);
1567 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
1568 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
1569                          nbytes, 64);
1570 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
1571                          block_group, 64);
1572 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
1573 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
1574 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
1575 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
1576 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
1577 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
1578
1579 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
1580 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
1581
1582 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1583                                   struct btrfs_inode_item *inode_item,
1584                                   struct inode *inode)
1585 {
1586         btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
1587         btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
1588         btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1589         btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1590         btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1591         btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1592         btrfs_set_stack_inode_generation(inode_item,
1593                                          BTRFS_I(inode)->generation);
1594         btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
1595         btrfs_set_stack_inode_transid(inode_item, trans->transid);
1596         btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1597         btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1598         btrfs_set_stack_inode_block_group(inode_item, 0);
1599
1600         btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1601                                      inode->i_atime.tv_sec);
1602         btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1603                                       inode->i_atime.tv_nsec);
1604
1605         btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1606                                      inode->i_mtime.tv_sec);
1607         btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1608                                       inode->i_mtime.tv_nsec);
1609
1610         btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1611                                      inode->i_ctime.tv_sec);
1612         btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1613                                       inode->i_ctime.tv_nsec);
1614 }
1615
1616 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1617                                struct btrfs_root *root, struct inode *inode)
1618 {
1619         struct btrfs_delayed_node *delayed_node;
1620         int ret = 0;
1621
1622         delayed_node = btrfs_get_or_create_delayed_node(inode);
1623         if (IS_ERR(delayed_node))
1624                 return PTR_ERR(delayed_node);
1625
1626         mutex_lock(&delayed_node->mutex);
1627         if (delayed_node->inode_dirty) {
1628                 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1629                 goto release_node;
1630         }
1631
1632         ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1633         /*
1634          * we must reserve enough space when we start a new transaction,
1635          * so reserving metadata failure is impossible
1636          */
1637         BUG_ON(ret);
1638
1639         fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1640         delayed_node->inode_dirty = 1;
1641         delayed_node->count++;
1642         atomic_inc(&root->fs_info->delayed_root->items);
1643 release_node:
1644         mutex_unlock(&delayed_node->mutex);
1645         btrfs_release_delayed_node(delayed_node);
1646         return ret;
1647 }
1648
1649 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1650 {
1651         struct btrfs_root *root = delayed_node->root;
1652         struct btrfs_delayed_item *curr_item, *prev_item;
1653
1654         mutex_lock(&delayed_node->mutex);
1655         curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1656         while (curr_item) {
1657                 btrfs_delayed_item_release_metadata(root, curr_item);
1658                 prev_item = curr_item;
1659                 curr_item = __btrfs_next_delayed_item(prev_item);
1660                 btrfs_release_delayed_item(prev_item);
1661         }
1662
1663         curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1664         while (curr_item) {
1665                 btrfs_delayed_item_release_metadata(root, curr_item);
1666                 prev_item = curr_item;
1667                 curr_item = __btrfs_next_delayed_item(prev_item);
1668                 btrfs_release_delayed_item(prev_item);
1669         }
1670
1671         if (delayed_node->inode_dirty) {
1672                 btrfs_delayed_inode_release_metadata(root, delayed_node);
1673                 btrfs_release_delayed_inode(delayed_node);
1674         }
1675         mutex_unlock(&delayed_node->mutex);
1676 }
1677
1678 void btrfs_kill_delayed_inode_items(struct inode *inode)
1679 {
1680         struct btrfs_delayed_node *delayed_node;
1681
1682         delayed_node = btrfs_get_delayed_node(inode);
1683         if (!delayed_node)
1684                 return;
1685
1686         __btrfs_kill_delayed_node(delayed_node);
1687         btrfs_release_delayed_node(delayed_node);
1688 }
1689
1690 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1691 {
1692         u64 inode_id = 0;
1693         struct btrfs_delayed_node *delayed_nodes[8];
1694         int i, n;
1695
1696         while (1) {
1697                 spin_lock(&root->inode_lock);
1698                 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1699                                            (void **)delayed_nodes, inode_id,
1700                                            ARRAY_SIZE(delayed_nodes));
1701                 if (!n) {
1702                         spin_unlock(&root->inode_lock);
1703                         break;
1704                 }
1705
1706                 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1707
1708                 for (i = 0; i < n; i++)
1709                         atomic_inc(&delayed_nodes[i]->refs);
1710                 spin_unlock(&root->inode_lock);
1711
1712                 for (i = 0; i < n; i++) {
1713                         __btrfs_kill_delayed_node(delayed_nodes[i]);
1714                         btrfs_release_delayed_node(delayed_nodes[i]);
1715                 }
1716         }
1717 }