Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / fs / btrfs / delayed-inode.c
1 /*
2  * Copyright (C) 2011 Fujitsu.  All rights reserved.
3  * Written by Miao Xie <miaox@cn.fujitsu.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24
25 #define BTRFS_DELAYED_WRITEBACK         400
26 #define BTRFS_DELAYED_BACKGROUND        100
27
28 static struct kmem_cache *delayed_node_cache;
29
30 int __init btrfs_delayed_inode_init(void)
31 {
32         delayed_node_cache = kmem_cache_create("delayed_node",
33                                         sizeof(struct btrfs_delayed_node),
34                                         0,
35                                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
36                                         NULL);
37         if (!delayed_node_cache)
38                 return -ENOMEM;
39         return 0;
40 }
41
42 void btrfs_delayed_inode_exit(void)
43 {
44         if (delayed_node_cache)
45                 kmem_cache_destroy(delayed_node_cache);
46 }
47
48 static inline void btrfs_init_delayed_node(
49                                 struct btrfs_delayed_node *delayed_node,
50                                 struct btrfs_root *root, u64 inode_id)
51 {
52         delayed_node->root = root;
53         delayed_node->inode_id = inode_id;
54         atomic_set(&delayed_node->refs, 0);
55         delayed_node->count = 0;
56         delayed_node->in_list = 0;
57         delayed_node->inode_dirty = 0;
58         delayed_node->ins_root = RB_ROOT;
59         delayed_node->del_root = RB_ROOT;
60         mutex_init(&delayed_node->mutex);
61         delayed_node->index_cnt = 0;
62         INIT_LIST_HEAD(&delayed_node->n_list);
63         INIT_LIST_HEAD(&delayed_node->p_list);
64         delayed_node->bytes_reserved = 0;
65 }
66
67 static inline int btrfs_is_continuous_delayed_item(
68                                         struct btrfs_delayed_item *item1,
69                                         struct btrfs_delayed_item *item2)
70 {
71         if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
72             item1->key.objectid == item2->key.objectid &&
73             item1->key.type == item2->key.type &&
74             item1->key.offset + 1 == item2->key.offset)
75                 return 1;
76         return 0;
77 }
78
79 static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
80                                                         struct btrfs_root *root)
81 {
82         return root->fs_info->delayed_root;
83 }
84
85 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
86                                                         struct inode *inode)
87 {
88         struct btrfs_delayed_node *node;
89         struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
90         struct btrfs_root *root = btrfs_inode->root;
91         u64 ino = btrfs_ino(inode);
92         int ret;
93
94 again:
95         node = ACCESS_ONCE(btrfs_inode->delayed_node);
96         if (node) {
97                 atomic_inc(&node->refs);        /* can be accessed */
98                 return node;
99         }
100
101         spin_lock(&root->inode_lock);
102         node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
103         if (node) {
104                 if (btrfs_inode->delayed_node) {
105                         spin_unlock(&root->inode_lock);
106                         goto again;
107                 }
108                 btrfs_inode->delayed_node = node;
109                 atomic_inc(&node->refs);        /* can be accessed */
110                 atomic_inc(&node->refs);        /* cached in the inode */
111                 spin_unlock(&root->inode_lock);
112                 return node;
113         }
114         spin_unlock(&root->inode_lock);
115
116         node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
117         if (!node)
118                 return ERR_PTR(-ENOMEM);
119         btrfs_init_delayed_node(node, root, ino);
120
121         atomic_inc(&node->refs);        /* cached in the btrfs inode */
122         atomic_inc(&node->refs);        /* can be accessed */
123
124         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
125         if (ret) {
126                 kmem_cache_free(delayed_node_cache, node);
127                 return ERR_PTR(ret);
128         }
129
130         spin_lock(&root->inode_lock);
131         ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
132         if (ret == -EEXIST) {
133                 kmem_cache_free(delayed_node_cache, node);
134                 spin_unlock(&root->inode_lock);
135                 radix_tree_preload_end();
136                 goto again;
137         }
138         btrfs_inode->delayed_node = node;
139         spin_unlock(&root->inode_lock);
140         radix_tree_preload_end();
141
142         return node;
143 }
144
145 /*
146  * Call it when holding delayed_node->mutex
147  *
148  * If mod = 1, add this node into the prepared list.
149  */
150 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
151                                      struct btrfs_delayed_node *node,
152                                      int mod)
153 {
154         spin_lock(&root->lock);
155         if (node->in_list) {
156                 if (!list_empty(&node->p_list))
157                         list_move_tail(&node->p_list, &root->prepare_list);
158                 else if (mod)
159                         list_add_tail(&node->p_list, &root->prepare_list);
160         } else {
161                 list_add_tail(&node->n_list, &root->node_list);
162                 list_add_tail(&node->p_list, &root->prepare_list);
163                 atomic_inc(&node->refs);        /* inserted into list */
164                 root->nodes++;
165                 node->in_list = 1;
166         }
167         spin_unlock(&root->lock);
168 }
169
170 /* Call it when holding delayed_node->mutex */
171 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
172                                        struct btrfs_delayed_node *node)
173 {
174         spin_lock(&root->lock);
175         if (node->in_list) {
176                 root->nodes--;
177                 atomic_dec(&node->refs);        /* not in the list */
178                 list_del_init(&node->n_list);
179                 if (!list_empty(&node->p_list))
180                         list_del_init(&node->p_list);
181                 node->in_list = 0;
182         }
183         spin_unlock(&root->lock);
184 }
185
186 struct btrfs_delayed_node *btrfs_first_delayed_node(
187                         struct btrfs_delayed_root *delayed_root)
188 {
189         struct list_head *p;
190         struct btrfs_delayed_node *node = NULL;
191
192         spin_lock(&delayed_root->lock);
193         if (list_empty(&delayed_root->node_list))
194                 goto out;
195
196         p = delayed_root->node_list.next;
197         node = list_entry(p, struct btrfs_delayed_node, n_list);
198         atomic_inc(&node->refs);
199 out:
200         spin_unlock(&delayed_root->lock);
201
202         return node;
203 }
204
205 struct btrfs_delayed_node *btrfs_next_delayed_node(
206                                                 struct btrfs_delayed_node *node)
207 {
208         struct btrfs_delayed_root *delayed_root;
209         struct list_head *p;
210         struct btrfs_delayed_node *next = NULL;
211
212         delayed_root = node->root->fs_info->delayed_root;
213         spin_lock(&delayed_root->lock);
214         if (!node->in_list) {   /* not in the list */
215                 if (list_empty(&delayed_root->node_list))
216                         goto out;
217                 p = delayed_root->node_list.next;
218         } else if (list_is_last(&node->n_list, &delayed_root->node_list))
219                 goto out;
220         else
221                 p = node->n_list.next;
222
223         next = list_entry(p, struct btrfs_delayed_node, n_list);
224         atomic_inc(&next->refs);
225 out:
226         spin_unlock(&delayed_root->lock);
227
228         return next;
229 }
230
231 static void __btrfs_release_delayed_node(
232                                 struct btrfs_delayed_node *delayed_node,
233                                 int mod)
234 {
235         struct btrfs_delayed_root *delayed_root;
236
237         if (!delayed_node)
238                 return;
239
240         delayed_root = delayed_node->root->fs_info->delayed_root;
241
242         mutex_lock(&delayed_node->mutex);
243         if (delayed_node->count)
244                 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
245         else
246                 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
247         mutex_unlock(&delayed_node->mutex);
248
249         if (atomic_dec_and_test(&delayed_node->refs)) {
250                 struct btrfs_root *root = delayed_node->root;
251                 spin_lock(&root->inode_lock);
252                 if (atomic_read(&delayed_node->refs) == 0) {
253                         radix_tree_delete(&root->delayed_nodes_tree,
254                                           delayed_node->inode_id);
255                         kmem_cache_free(delayed_node_cache, delayed_node);
256                 }
257                 spin_unlock(&root->inode_lock);
258         }
259 }
260
261 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
262 {
263         __btrfs_release_delayed_node(node, 0);
264 }
265
266 struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
267                                         struct btrfs_delayed_root *delayed_root)
268 {
269         struct list_head *p;
270         struct btrfs_delayed_node *node = NULL;
271
272         spin_lock(&delayed_root->lock);
273         if (list_empty(&delayed_root->prepare_list))
274                 goto out;
275
276         p = delayed_root->prepare_list.next;
277         list_del_init(p);
278         node = list_entry(p, struct btrfs_delayed_node, p_list);
279         atomic_inc(&node->refs);
280 out:
281         spin_unlock(&delayed_root->lock);
282
283         return node;
284 }
285
286 static inline void btrfs_release_prepared_delayed_node(
287                                         struct btrfs_delayed_node *node)
288 {
289         __btrfs_release_delayed_node(node, 1);
290 }
291
292 struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
293 {
294         struct btrfs_delayed_item *item;
295         item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
296         if (item) {
297                 item->data_len = data_len;
298                 item->ins_or_del = 0;
299                 item->bytes_reserved = 0;
300                 item->block_rsv = NULL;
301                 item->delayed_node = NULL;
302                 atomic_set(&item->refs, 1);
303         }
304         return item;
305 }
306
307 /*
308  * __btrfs_lookup_delayed_item - look up the delayed item by key
309  * @delayed_node: pointer to the delayed node
310  * @key:          the key to look up
311  * @prev:         used to store the prev item if the right item isn't found
312  * @next:         used to store the next item if the right item isn't found
313  *
314  * Note: if we don't find the right item, we will return the prev item and
315  * the next item.
316  */
317 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
318                                 struct rb_root *root,
319                                 struct btrfs_key *key,
320                                 struct btrfs_delayed_item **prev,
321                                 struct btrfs_delayed_item **next)
322 {
323         struct rb_node *node, *prev_node = NULL;
324         struct btrfs_delayed_item *delayed_item = NULL;
325         int ret = 0;
326
327         node = root->rb_node;
328
329         while (node) {
330                 delayed_item = rb_entry(node, struct btrfs_delayed_item,
331                                         rb_node);
332                 prev_node = node;
333                 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
334                 if (ret < 0)
335                         node = node->rb_right;
336                 else if (ret > 0)
337                         node = node->rb_left;
338                 else
339                         return delayed_item;
340         }
341
342         if (prev) {
343                 if (!prev_node)
344                         *prev = NULL;
345                 else if (ret < 0)
346                         *prev = delayed_item;
347                 else if ((node = rb_prev(prev_node)) != NULL) {
348                         *prev = rb_entry(node, struct btrfs_delayed_item,
349                                          rb_node);
350                 } else
351                         *prev = NULL;
352         }
353
354         if (next) {
355                 if (!prev_node)
356                         *next = NULL;
357                 else if (ret > 0)
358                         *next = delayed_item;
359                 else if ((node = rb_next(prev_node)) != NULL) {
360                         *next = rb_entry(node, struct btrfs_delayed_item,
361                                          rb_node);
362                 } else
363                         *next = NULL;
364         }
365         return NULL;
366 }
367
368 struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
369                                         struct btrfs_delayed_node *delayed_node,
370                                         struct btrfs_key *key)
371 {
372         struct btrfs_delayed_item *item;
373
374         item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
375                                            NULL, NULL);
376         return item;
377 }
378
379 struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
380                                         struct btrfs_delayed_node *delayed_node,
381                                         struct btrfs_key *key)
382 {
383         struct btrfs_delayed_item *item;
384
385         item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
386                                            NULL, NULL);
387         return item;
388 }
389
390 struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
391                                         struct btrfs_delayed_node *delayed_node,
392                                         struct btrfs_key *key)
393 {
394         struct btrfs_delayed_item *item, *next;
395
396         item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
397                                            NULL, &next);
398         if (!item)
399                 item = next;
400
401         return item;
402 }
403
404 struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
405                                         struct btrfs_delayed_node *delayed_node,
406                                         struct btrfs_key *key)
407 {
408         struct btrfs_delayed_item *item, *next;
409
410         item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
411                                            NULL, &next);
412         if (!item)
413                 item = next;
414
415         return item;
416 }
417
418 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
419                                     struct btrfs_delayed_item *ins,
420                                     int action)
421 {
422         struct rb_node **p, *node;
423         struct rb_node *parent_node = NULL;
424         struct rb_root *root;
425         struct btrfs_delayed_item *item;
426         int cmp;
427
428         if (action == BTRFS_DELAYED_INSERTION_ITEM)
429                 root = &delayed_node->ins_root;
430         else if (action == BTRFS_DELAYED_DELETION_ITEM)
431                 root = &delayed_node->del_root;
432         else
433                 BUG();
434         p = &root->rb_node;
435         node = &ins->rb_node;
436
437         while (*p) {
438                 parent_node = *p;
439                 item = rb_entry(parent_node, struct btrfs_delayed_item,
440                                  rb_node);
441
442                 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
443                 if (cmp < 0)
444                         p = &(*p)->rb_right;
445                 else if (cmp > 0)
446                         p = &(*p)->rb_left;
447                 else
448                         return -EEXIST;
449         }
450
451         rb_link_node(node, parent_node, p);
452         rb_insert_color(node, root);
453         ins->delayed_node = delayed_node;
454         ins->ins_or_del = action;
455
456         if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
457             action == BTRFS_DELAYED_INSERTION_ITEM &&
458             ins->key.offset >= delayed_node->index_cnt)
459                         delayed_node->index_cnt = ins->key.offset + 1;
460
461         delayed_node->count++;
462         atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
463         return 0;
464 }
465
466 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
467                                               struct btrfs_delayed_item *item)
468 {
469         return __btrfs_add_delayed_item(node, item,
470                                         BTRFS_DELAYED_INSERTION_ITEM);
471 }
472
473 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
474                                              struct btrfs_delayed_item *item)
475 {
476         return __btrfs_add_delayed_item(node, item,
477                                         BTRFS_DELAYED_DELETION_ITEM);
478 }
479
480 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
481 {
482         struct rb_root *root;
483         struct btrfs_delayed_root *delayed_root;
484
485         delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
486
487         BUG_ON(!delayed_root);
488         BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
489                delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
490
491         if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
492                 root = &delayed_item->delayed_node->ins_root;
493         else
494                 root = &delayed_item->delayed_node->del_root;
495
496         rb_erase(&delayed_item->rb_node, root);
497         delayed_item->delayed_node->count--;
498         atomic_dec(&delayed_root->items);
499         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
500             waitqueue_active(&delayed_root->wait))
501                 wake_up(&delayed_root->wait);
502 }
503
504 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
505 {
506         if (item) {
507                 __btrfs_remove_delayed_item(item);
508                 if (atomic_dec_and_test(&item->refs))
509                         kfree(item);
510         }
511 }
512
513 struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
514                                         struct btrfs_delayed_node *delayed_node)
515 {
516         struct rb_node *p;
517         struct btrfs_delayed_item *item = NULL;
518
519         p = rb_first(&delayed_node->ins_root);
520         if (p)
521                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
522
523         return item;
524 }
525
526 struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
527                                         struct btrfs_delayed_node *delayed_node)
528 {
529         struct rb_node *p;
530         struct btrfs_delayed_item *item = NULL;
531
532         p = rb_first(&delayed_node->del_root);
533         if (p)
534                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
535
536         return item;
537 }
538
539 struct btrfs_delayed_item *__btrfs_next_delayed_item(
540                                                 struct btrfs_delayed_item *item)
541 {
542         struct rb_node *p;
543         struct btrfs_delayed_item *next = NULL;
544
545         p = rb_next(&item->rb_node);
546         if (p)
547                 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
548
549         return next;
550 }
551
552 static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
553                                                         struct inode *inode)
554 {
555         struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
556         struct btrfs_delayed_node *delayed_node;
557
558         delayed_node = btrfs_inode->delayed_node;
559         if (delayed_node)
560                 atomic_inc(&delayed_node->refs);
561
562         return delayed_node;
563 }
564
565 static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
566                                                    u64 root_id)
567 {
568         struct btrfs_key root_key;
569
570         if (root->objectid == root_id)
571                 return root;
572
573         root_key.objectid = root_id;
574         root_key.type = BTRFS_ROOT_ITEM_KEY;
575         root_key.offset = (u64)-1;
576         return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
577 }
578
579 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
580                                                struct btrfs_root *root,
581                                                struct btrfs_delayed_item *item)
582 {
583         struct btrfs_block_rsv *src_rsv;
584         struct btrfs_block_rsv *dst_rsv;
585         u64 num_bytes;
586         int ret;
587
588         if (!trans->bytes_reserved)
589                 return 0;
590
591         src_rsv = trans->block_rsv;
592         dst_rsv = &root->fs_info->global_block_rsv;
593
594         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
595         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
596         if (!ret) {
597                 item->bytes_reserved = num_bytes;
598                 item->block_rsv = dst_rsv;
599         }
600
601         return ret;
602 }
603
604 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
605                                                 struct btrfs_delayed_item *item)
606 {
607         if (!item->bytes_reserved)
608                 return;
609
610         btrfs_block_rsv_release(root, item->block_rsv,
611                                 item->bytes_reserved);
612 }
613
614 static int btrfs_delayed_inode_reserve_metadata(
615                                         struct btrfs_trans_handle *trans,
616                                         struct btrfs_root *root,
617                                         struct btrfs_delayed_node *node)
618 {
619         struct btrfs_block_rsv *src_rsv;
620         struct btrfs_block_rsv *dst_rsv;
621         u64 num_bytes;
622         int ret;
623
624         if (!trans->bytes_reserved)
625                 return 0;
626
627         src_rsv = trans->block_rsv;
628         dst_rsv = &root->fs_info->global_block_rsv;
629
630         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
631         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
632         if (!ret)
633                 node->bytes_reserved = num_bytes;
634
635         return ret;
636 }
637
638 static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
639                                                 struct btrfs_delayed_node *node)
640 {
641         struct btrfs_block_rsv *rsv;
642
643         if (!node->bytes_reserved)
644                 return;
645
646         rsv = &root->fs_info->global_block_rsv;
647         btrfs_block_rsv_release(root, rsv,
648                                 node->bytes_reserved);
649         node->bytes_reserved = 0;
650 }
651
652 /*
653  * This helper will insert some continuous items into the same leaf according
654  * to the free space of the leaf.
655  */
656 static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
657                                 struct btrfs_root *root,
658                                 struct btrfs_path *path,
659                                 struct btrfs_delayed_item *item)
660 {
661         struct btrfs_delayed_item *curr, *next;
662         int free_space;
663         int total_data_size = 0, total_size = 0;
664         struct extent_buffer *leaf;
665         char *data_ptr;
666         struct btrfs_key *keys;
667         u32 *data_size;
668         struct list_head head;
669         int slot;
670         int nitems;
671         int i;
672         int ret = 0;
673
674         BUG_ON(!path->nodes[0]);
675
676         leaf = path->nodes[0];
677         free_space = btrfs_leaf_free_space(root, leaf);
678         INIT_LIST_HEAD(&head);
679
680         next = item;
681         nitems = 0;
682
683         /*
684          * count the number of the continuous items that we can insert in batch
685          */
686         while (total_size + next->data_len + sizeof(struct btrfs_item) <=
687                free_space) {
688                 total_data_size += next->data_len;
689                 total_size += next->data_len + sizeof(struct btrfs_item);
690                 list_add_tail(&next->tree_list, &head);
691                 nitems++;
692
693                 curr = next;
694                 next = __btrfs_next_delayed_item(curr);
695                 if (!next)
696                         break;
697
698                 if (!btrfs_is_continuous_delayed_item(curr, next))
699                         break;
700         }
701
702         if (!nitems) {
703                 ret = 0;
704                 goto out;
705         }
706
707         /*
708          * we need allocate some memory space, but it might cause the task
709          * to sleep, so we set all locked nodes in the path to blocking locks
710          * first.
711          */
712         btrfs_set_path_blocking(path);
713
714         keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
715         if (!keys) {
716                 ret = -ENOMEM;
717                 goto out;
718         }
719
720         data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
721         if (!data_size) {
722                 ret = -ENOMEM;
723                 goto error;
724         }
725
726         /* get keys of all the delayed items */
727         i = 0;
728         list_for_each_entry(next, &head, tree_list) {
729                 keys[i] = next->key;
730                 data_size[i] = next->data_len;
731                 i++;
732         }
733
734         /* reset all the locked nodes in the patch to spinning locks. */
735         btrfs_clear_path_blocking(path, NULL);
736
737         /* insert the keys of the items */
738         ret = setup_items_for_insert(trans, root, path, keys, data_size,
739                                      total_data_size, total_size, nitems);
740         if (ret)
741                 goto error;
742
743         /* insert the dir index items */
744         slot = path->slots[0];
745         list_for_each_entry_safe(curr, next, &head, tree_list) {
746                 data_ptr = btrfs_item_ptr(leaf, slot, char);
747                 write_extent_buffer(leaf, &curr->data,
748                                     (unsigned long)data_ptr,
749                                     curr->data_len);
750                 slot++;
751
752                 btrfs_delayed_item_release_metadata(root, curr);
753
754                 list_del(&curr->tree_list);
755                 btrfs_release_delayed_item(curr);
756         }
757
758 error:
759         kfree(data_size);
760         kfree(keys);
761 out:
762         return ret;
763 }
764
765 /*
766  * This helper can just do simple insertion that needn't extend item for new
767  * data, such as directory name index insertion, inode insertion.
768  */
769 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
770                                      struct btrfs_root *root,
771                                      struct btrfs_path *path,
772                                      struct btrfs_delayed_item *delayed_item)
773 {
774         struct extent_buffer *leaf;
775         struct btrfs_item *item;
776         char *ptr;
777         int ret;
778
779         ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
780                                       delayed_item->data_len);
781         if (ret < 0 && ret != -EEXIST)
782                 return ret;
783
784         leaf = path->nodes[0];
785
786         item = btrfs_item_nr(leaf, path->slots[0]);
787         ptr = btrfs_item_ptr(leaf, path->slots[0], char);
788
789         write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
790                             delayed_item->data_len);
791         btrfs_mark_buffer_dirty(leaf);
792
793         btrfs_delayed_item_release_metadata(root, delayed_item);
794         return 0;
795 }
796
797 /*
798  * we insert an item first, then if there are some continuous items, we try
799  * to insert those items into the same leaf.
800  */
801 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
802                                       struct btrfs_path *path,
803                                       struct btrfs_root *root,
804                                       struct btrfs_delayed_node *node)
805 {
806         struct btrfs_delayed_item *curr, *prev;
807         int ret = 0;
808
809 do_again:
810         mutex_lock(&node->mutex);
811         curr = __btrfs_first_delayed_insertion_item(node);
812         if (!curr)
813                 goto insert_end;
814
815         ret = btrfs_insert_delayed_item(trans, root, path, curr);
816         if (ret < 0) {
817                 btrfs_release_path(path);
818                 goto insert_end;
819         }
820
821         prev = curr;
822         curr = __btrfs_next_delayed_item(prev);
823         if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
824                 /* insert the continuous items into the same leaf */
825                 path->slots[0]++;
826                 btrfs_batch_insert_items(trans, root, path, curr);
827         }
828         btrfs_release_delayed_item(prev);
829         btrfs_mark_buffer_dirty(path->nodes[0]);
830
831         btrfs_release_path(path);
832         mutex_unlock(&node->mutex);
833         goto do_again;
834
835 insert_end:
836         mutex_unlock(&node->mutex);
837         return ret;
838 }
839
840 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
841                                     struct btrfs_root *root,
842                                     struct btrfs_path *path,
843                                     struct btrfs_delayed_item *item)
844 {
845         struct btrfs_delayed_item *curr, *next;
846         struct extent_buffer *leaf;
847         struct btrfs_key key;
848         struct list_head head;
849         int nitems, i, last_item;
850         int ret = 0;
851
852         BUG_ON(!path->nodes[0]);
853
854         leaf = path->nodes[0];
855
856         i = path->slots[0];
857         last_item = btrfs_header_nritems(leaf) - 1;
858         if (i > last_item)
859                 return -ENOENT; /* FIXME: Is errno suitable? */
860
861         next = item;
862         INIT_LIST_HEAD(&head);
863         btrfs_item_key_to_cpu(leaf, &key, i);
864         nitems = 0;
865         /*
866          * count the number of the dir index items that we can delete in batch
867          */
868         while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
869                 list_add_tail(&next->tree_list, &head);
870                 nitems++;
871
872                 curr = next;
873                 next = __btrfs_next_delayed_item(curr);
874                 if (!next)
875                         break;
876
877                 if (!btrfs_is_continuous_delayed_item(curr, next))
878                         break;
879
880                 i++;
881                 if (i > last_item)
882                         break;
883                 btrfs_item_key_to_cpu(leaf, &key, i);
884         }
885
886         if (!nitems)
887                 return 0;
888
889         ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
890         if (ret)
891                 goto out;
892
893         list_for_each_entry_safe(curr, next, &head, tree_list) {
894                 btrfs_delayed_item_release_metadata(root, curr);
895                 list_del(&curr->tree_list);
896                 btrfs_release_delayed_item(curr);
897         }
898
899 out:
900         return ret;
901 }
902
903 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
904                                       struct btrfs_path *path,
905                                       struct btrfs_root *root,
906                                       struct btrfs_delayed_node *node)
907 {
908         struct btrfs_delayed_item *curr, *prev;
909         int ret = 0;
910
911 do_again:
912         mutex_lock(&node->mutex);
913         curr = __btrfs_first_delayed_deletion_item(node);
914         if (!curr)
915                 goto delete_fail;
916
917         ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
918         if (ret < 0)
919                 goto delete_fail;
920         else if (ret > 0) {
921                 /*
922                  * can't find the item which the node points to, so this node
923                  * is invalid, just drop it.
924                  */
925                 prev = curr;
926                 curr = __btrfs_next_delayed_item(prev);
927                 btrfs_release_delayed_item(prev);
928                 ret = 0;
929                 btrfs_release_path(path);
930                 if (curr)
931                         goto do_again;
932                 else
933                         goto delete_fail;
934         }
935
936         btrfs_batch_delete_items(trans, root, path, curr);
937         btrfs_release_path(path);
938         mutex_unlock(&node->mutex);
939         goto do_again;
940
941 delete_fail:
942         btrfs_release_path(path);
943         mutex_unlock(&node->mutex);
944         return ret;
945 }
946
947 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
948 {
949         struct btrfs_delayed_root *delayed_root;
950
951         if (delayed_node && delayed_node->inode_dirty) {
952                 BUG_ON(!delayed_node->root);
953                 delayed_node->inode_dirty = 0;
954                 delayed_node->count--;
955
956                 delayed_root = delayed_node->root->fs_info->delayed_root;
957                 atomic_dec(&delayed_root->items);
958                 if (atomic_read(&delayed_root->items) <
959                     BTRFS_DELAYED_BACKGROUND &&
960                     waitqueue_active(&delayed_root->wait))
961                         wake_up(&delayed_root->wait);
962         }
963 }
964
965 static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
966                                       struct btrfs_root *root,
967                                       struct btrfs_path *path,
968                                       struct btrfs_delayed_node *node)
969 {
970         struct btrfs_key key;
971         struct btrfs_inode_item *inode_item;
972         struct extent_buffer *leaf;
973         int ret;
974
975         mutex_lock(&node->mutex);
976         if (!node->inode_dirty) {
977                 mutex_unlock(&node->mutex);
978                 return 0;
979         }
980
981         key.objectid = node->inode_id;
982         btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
983         key.offset = 0;
984         ret = btrfs_lookup_inode(trans, root, path, &key, 1);
985         if (ret > 0) {
986                 btrfs_release_path(path);
987                 mutex_unlock(&node->mutex);
988                 return -ENOENT;
989         } else if (ret < 0) {
990                 mutex_unlock(&node->mutex);
991                 return ret;
992         }
993
994         btrfs_unlock_up_safe(path, 1);
995         leaf = path->nodes[0];
996         inode_item = btrfs_item_ptr(leaf, path->slots[0],
997                                     struct btrfs_inode_item);
998         write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
999                             sizeof(struct btrfs_inode_item));
1000         btrfs_mark_buffer_dirty(leaf);
1001         btrfs_release_path(path);
1002
1003         btrfs_delayed_inode_release_metadata(root, node);
1004         btrfs_release_delayed_inode(node);
1005         mutex_unlock(&node->mutex);
1006
1007         return 0;
1008 }
1009
1010 /* Called when committing the transaction. */
1011 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1012                             struct btrfs_root *root)
1013 {
1014         struct btrfs_delayed_root *delayed_root;
1015         struct btrfs_delayed_node *curr_node, *prev_node;
1016         struct btrfs_path *path;
1017         int ret = 0;
1018
1019         path = btrfs_alloc_path();
1020         if (!path)
1021                 return -ENOMEM;
1022         path->leave_spinning = 1;
1023
1024         delayed_root = btrfs_get_delayed_root(root);
1025
1026         curr_node = btrfs_first_delayed_node(delayed_root);
1027         while (curr_node) {
1028                 root = curr_node->root;
1029                 ret = btrfs_insert_delayed_items(trans, path, root,
1030                                                  curr_node);
1031                 if (!ret)
1032                         ret = btrfs_delete_delayed_items(trans, path, root,
1033                                                          curr_node);
1034                 if (!ret)
1035                         ret = btrfs_update_delayed_inode(trans, root, path,
1036                                                          curr_node);
1037                 if (ret) {
1038                         btrfs_release_delayed_node(curr_node);
1039                         break;
1040                 }
1041
1042                 prev_node = curr_node;
1043                 curr_node = btrfs_next_delayed_node(curr_node);
1044                 btrfs_release_delayed_node(prev_node);
1045         }
1046
1047         btrfs_free_path(path);
1048         return ret;
1049 }
1050
1051 static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1052                                               struct btrfs_delayed_node *node)
1053 {
1054         struct btrfs_path *path;
1055         int ret;
1056
1057         path = btrfs_alloc_path();
1058         if (!path)
1059                 return -ENOMEM;
1060         path->leave_spinning = 1;
1061
1062         ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1063         if (!ret)
1064                 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1065         if (!ret)
1066                 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1067         btrfs_free_path(path);
1068
1069         return ret;
1070 }
1071
1072 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1073                                      struct inode *inode)
1074 {
1075         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1076         int ret;
1077
1078         if (!delayed_node)
1079                 return 0;
1080
1081         mutex_lock(&delayed_node->mutex);
1082         if (!delayed_node->count) {
1083                 mutex_unlock(&delayed_node->mutex);
1084                 btrfs_release_delayed_node(delayed_node);
1085                 return 0;
1086         }
1087         mutex_unlock(&delayed_node->mutex);
1088
1089         ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
1090         btrfs_release_delayed_node(delayed_node);
1091         return ret;
1092 }
1093
1094 void btrfs_remove_delayed_node(struct inode *inode)
1095 {
1096         struct btrfs_delayed_node *delayed_node;
1097
1098         delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1099         if (!delayed_node)
1100                 return;
1101
1102         BTRFS_I(inode)->delayed_node = NULL;
1103         btrfs_release_delayed_node(delayed_node);
1104 }
1105
1106 struct btrfs_async_delayed_node {
1107         struct btrfs_root *root;
1108         struct btrfs_delayed_node *delayed_node;
1109         struct btrfs_work work;
1110 };
1111
1112 static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1113 {
1114         struct btrfs_async_delayed_node *async_node;
1115         struct btrfs_trans_handle *trans;
1116         struct btrfs_path *path;
1117         struct btrfs_delayed_node *delayed_node = NULL;
1118         struct btrfs_root *root;
1119         unsigned long nr = 0;
1120         int need_requeue = 0;
1121         int ret;
1122
1123         async_node = container_of(work, struct btrfs_async_delayed_node, work);
1124
1125         path = btrfs_alloc_path();
1126         if (!path)
1127                 goto out;
1128         path->leave_spinning = 1;
1129
1130         delayed_node = async_node->delayed_node;
1131         root = delayed_node->root;
1132
1133         trans = btrfs_join_transaction(root);
1134         if (IS_ERR(trans))
1135                 goto free_path;
1136
1137         ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1138         if (!ret)
1139                 ret = btrfs_delete_delayed_items(trans, path, root,
1140                                                  delayed_node);
1141
1142         if (!ret)
1143                 btrfs_update_delayed_inode(trans, root, path, delayed_node);
1144
1145         /*
1146          * Maybe new delayed items have been inserted, so we need requeue
1147          * the work. Besides that, we must dequeue the empty delayed nodes
1148          * to avoid the race between delayed items balance and the worker.
1149          * The race like this:
1150          *      Task1                           Worker thread
1151          *                                      count == 0, needn't requeue
1152          *                                        also needn't insert the
1153          *                                        delayed node into prepare
1154          *                                        list again.
1155          *      add lots of delayed items
1156          *      queue the delayed node
1157          *        already in the list,
1158          *        and not in the prepare
1159          *        list, it means the delayed
1160          *        node is being dealt with
1161          *        by the worker.
1162          *      do delayed items balance
1163          *        the delayed node is being
1164          *        dealt with by the worker
1165          *        now, just wait.
1166          *                                      the worker goto idle.
1167          * Task1 will sleep until the transaction is commited.
1168          */
1169         mutex_lock(&delayed_node->mutex);
1170         if (delayed_node->count)
1171                 need_requeue = 1;
1172         else
1173                 btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
1174                                            delayed_node);
1175         mutex_unlock(&delayed_node->mutex);
1176
1177         nr = trans->blocks_used;
1178
1179         btrfs_end_transaction_dmeta(trans, root);
1180         __btrfs_btree_balance_dirty(root, nr);
1181 free_path:
1182         btrfs_free_path(path);
1183 out:
1184         if (need_requeue)
1185                 btrfs_requeue_work(&async_node->work);
1186         else {
1187                 btrfs_release_prepared_delayed_node(delayed_node);
1188                 kfree(async_node);
1189         }
1190 }
1191
1192 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1193                                      struct btrfs_root *root, int all)
1194 {
1195         struct btrfs_async_delayed_node *async_node;
1196         struct btrfs_delayed_node *curr;
1197         int count = 0;
1198
1199 again:
1200         curr = btrfs_first_prepared_delayed_node(delayed_root);
1201         if (!curr)
1202                 return 0;
1203
1204         async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
1205         if (!async_node) {
1206                 btrfs_release_prepared_delayed_node(curr);
1207                 return -ENOMEM;
1208         }
1209
1210         async_node->root = root;
1211         async_node->delayed_node = curr;
1212
1213         async_node->work.func = btrfs_async_run_delayed_node_done;
1214         async_node->work.flags = 0;
1215
1216         btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
1217         count++;
1218
1219         if (all || count < 4)
1220                 goto again;
1221
1222         return 0;
1223 }
1224
1225 void btrfs_balance_delayed_items(struct btrfs_root *root)
1226 {
1227         struct btrfs_delayed_root *delayed_root;
1228
1229         delayed_root = btrfs_get_delayed_root(root);
1230
1231         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1232                 return;
1233
1234         if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1235                 int ret;
1236                 ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
1237                 if (ret)
1238                         return;
1239
1240                 wait_event_interruptible_timeout(
1241                                 delayed_root->wait,
1242                                 (atomic_read(&delayed_root->items) <
1243                                  BTRFS_DELAYED_BACKGROUND),
1244                                 HZ);
1245                 return;
1246         }
1247
1248         btrfs_wq_run_delayed_node(delayed_root, root, 0);
1249 }
1250
1251 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1252                                    struct btrfs_root *root, const char *name,
1253                                    int name_len, struct inode *dir,
1254                                    struct btrfs_disk_key *disk_key, u8 type,
1255                                    u64 index)
1256 {
1257         struct btrfs_delayed_node *delayed_node;
1258         struct btrfs_delayed_item *delayed_item;
1259         struct btrfs_dir_item *dir_item;
1260         int ret;
1261
1262         delayed_node = btrfs_get_or_create_delayed_node(dir);
1263         if (IS_ERR(delayed_node))
1264                 return PTR_ERR(delayed_node);
1265
1266         delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1267         if (!delayed_item) {
1268                 ret = -ENOMEM;
1269                 goto release_node;
1270         }
1271
1272         ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1273         /*
1274          * we have reserved enough space when we start a new transaction,
1275          * so reserving metadata failure is impossible
1276          */
1277         BUG_ON(ret);
1278
1279         delayed_item->key.objectid = btrfs_ino(dir);
1280         btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1281         delayed_item->key.offset = index;
1282
1283         dir_item = (struct btrfs_dir_item *)delayed_item->data;
1284         dir_item->location = *disk_key;
1285         dir_item->transid = cpu_to_le64(trans->transid);
1286         dir_item->data_len = 0;
1287         dir_item->name_len = cpu_to_le16(name_len);
1288         dir_item->type = type;
1289         memcpy((char *)(dir_item + 1), name, name_len);
1290
1291         mutex_lock(&delayed_node->mutex);
1292         ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1293         if (unlikely(ret)) {
1294                 printk(KERN_ERR "err add delayed dir index item(name: %s) into "
1295                                 "the insertion tree of the delayed node"
1296                                 "(root id: %llu, inode id: %llu, errno: %d)\n",
1297                                 name,
1298                                 (unsigned long long)delayed_node->root->objectid,
1299                                 (unsigned long long)delayed_node->inode_id,
1300                                 ret);
1301                 BUG();
1302         }
1303         mutex_unlock(&delayed_node->mutex);
1304
1305 release_node:
1306         btrfs_release_delayed_node(delayed_node);
1307         return ret;
1308 }
1309
1310 static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1311                                                struct btrfs_delayed_node *node,
1312                                                struct btrfs_key *key)
1313 {
1314         struct btrfs_delayed_item *item;
1315
1316         mutex_lock(&node->mutex);
1317         item = __btrfs_lookup_delayed_insertion_item(node, key);
1318         if (!item) {
1319                 mutex_unlock(&node->mutex);
1320                 return 1;
1321         }
1322
1323         btrfs_delayed_item_release_metadata(root, item);
1324         btrfs_release_delayed_item(item);
1325         mutex_unlock(&node->mutex);
1326         return 0;
1327 }
1328
1329 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1330                                    struct btrfs_root *root, struct inode *dir,
1331                                    u64 index)
1332 {
1333         struct btrfs_delayed_node *node;
1334         struct btrfs_delayed_item *item;
1335         struct btrfs_key item_key;
1336         int ret;
1337
1338         node = btrfs_get_or_create_delayed_node(dir);
1339         if (IS_ERR(node))
1340                 return PTR_ERR(node);
1341
1342         item_key.objectid = btrfs_ino(dir);
1343         btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1344         item_key.offset = index;
1345
1346         ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1347         if (!ret)
1348                 goto end;
1349
1350         item = btrfs_alloc_delayed_item(0);
1351         if (!item) {
1352                 ret = -ENOMEM;
1353                 goto end;
1354         }
1355
1356         item->key = item_key;
1357
1358         ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1359         /*
1360          * we have reserved enough space when we start a new transaction,
1361          * so reserving metadata failure is impossible.
1362          */
1363         BUG_ON(ret);
1364
1365         mutex_lock(&node->mutex);
1366         ret = __btrfs_add_delayed_deletion_item(node, item);
1367         if (unlikely(ret)) {
1368                 printk(KERN_ERR "err add delayed dir index item(index: %llu) "
1369                                 "into the deletion tree of the delayed node"
1370                                 "(root id: %llu, inode id: %llu, errno: %d)\n",
1371                                 (unsigned long long)index,
1372                                 (unsigned long long)node->root->objectid,
1373                                 (unsigned long long)node->inode_id,
1374                                 ret);
1375                 BUG();
1376         }
1377         mutex_unlock(&node->mutex);
1378 end:
1379         btrfs_release_delayed_node(node);
1380         return ret;
1381 }
1382
1383 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1384 {
1385         struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
1386         int ret = 0;
1387
1388         if (!delayed_node)
1389                 return -ENOENT;
1390
1391         /*
1392          * Since we have held i_mutex of this directory, it is impossible that
1393          * a new directory index is added into the delayed node and index_cnt
1394          * is updated now. So we needn't lock the delayed node.
1395          */
1396         if (!delayed_node->index_cnt)
1397                 return -EINVAL;
1398
1399         BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1400         return ret;
1401 }
1402
1403 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1404                              struct list_head *del_list)
1405 {
1406         struct btrfs_delayed_node *delayed_node;
1407         struct btrfs_delayed_item *item;
1408
1409         delayed_node = btrfs_get_delayed_node(inode);
1410         if (!delayed_node)
1411                 return;
1412
1413         mutex_lock(&delayed_node->mutex);
1414         item = __btrfs_first_delayed_insertion_item(delayed_node);
1415         while (item) {
1416                 atomic_inc(&item->refs);
1417                 list_add_tail(&item->readdir_list, ins_list);
1418                 item = __btrfs_next_delayed_item(item);
1419         }
1420
1421         item = __btrfs_first_delayed_deletion_item(delayed_node);
1422         while (item) {
1423                 atomic_inc(&item->refs);
1424                 list_add_tail(&item->readdir_list, del_list);
1425                 item = __btrfs_next_delayed_item(item);
1426         }
1427         mutex_unlock(&delayed_node->mutex);
1428         /*
1429          * This delayed node is still cached in the btrfs inode, so refs
1430          * must be > 1 now, and we needn't check it is going to be freed
1431          * or not.
1432          *
1433          * Besides that, this function is used to read dir, we do not
1434          * insert/delete delayed items in this period. So we also needn't
1435          * requeue or dequeue this delayed node.
1436          */
1437         atomic_dec(&delayed_node->refs);
1438 }
1439
1440 void btrfs_put_delayed_items(struct list_head *ins_list,
1441                              struct list_head *del_list)
1442 {
1443         struct btrfs_delayed_item *curr, *next;
1444
1445         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1446                 list_del(&curr->readdir_list);
1447                 if (atomic_dec_and_test(&curr->refs))
1448                         kfree(curr);
1449         }
1450
1451         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1452                 list_del(&curr->readdir_list);
1453                 if (atomic_dec_and_test(&curr->refs))
1454                         kfree(curr);
1455         }
1456 }
1457
1458 int btrfs_should_delete_dir_index(struct list_head *del_list,
1459                                   u64 index)
1460 {
1461         struct btrfs_delayed_item *curr, *next;
1462         int ret;
1463
1464         if (list_empty(del_list))
1465                 return 0;
1466
1467         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1468                 if (curr->key.offset > index)
1469                         break;
1470
1471                 list_del(&curr->readdir_list);
1472                 ret = (curr->key.offset == index);
1473
1474                 if (atomic_dec_and_test(&curr->refs))
1475                         kfree(curr);
1476
1477                 if (ret)
1478                         return 1;
1479                 else
1480                         continue;
1481         }
1482         return 0;
1483 }
1484
1485 /*
1486  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1487  *
1488  */
1489 int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
1490                                     filldir_t filldir,
1491                                     struct list_head *ins_list)
1492 {
1493         struct btrfs_dir_item *di;
1494         struct btrfs_delayed_item *curr, *next;
1495         struct btrfs_key location;
1496         char *name;
1497         int name_len;
1498         int over = 0;
1499         unsigned char d_type;
1500
1501         if (list_empty(ins_list))
1502                 return 0;
1503
1504         /*
1505          * Changing the data of the delayed item is impossible. So
1506          * we needn't lock them. And we have held i_mutex of the
1507          * directory, nobody can delete any directory indexes now.
1508          */
1509         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1510                 list_del(&curr->readdir_list);
1511
1512                 if (curr->key.offset < filp->f_pos) {
1513                         if (atomic_dec_and_test(&curr->refs))
1514                                 kfree(curr);
1515                         continue;
1516                 }
1517
1518                 filp->f_pos = curr->key.offset;
1519
1520                 di = (struct btrfs_dir_item *)curr->data;
1521                 name = (char *)(di + 1);
1522                 name_len = le16_to_cpu(di->name_len);
1523
1524                 d_type = btrfs_filetype_table[di->type];
1525                 btrfs_disk_key_to_cpu(&location, &di->location);
1526
1527                 over = filldir(dirent, name, name_len, curr->key.offset,
1528                                location.objectid, d_type);
1529
1530                 if (atomic_dec_and_test(&curr->refs))
1531                         kfree(curr);
1532
1533                 if (over)
1534                         return 1;
1535         }
1536         return 0;
1537 }
1538
1539 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
1540                          generation, 64);
1541 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
1542                          sequence, 64);
1543 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
1544                          transid, 64);
1545 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
1546 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
1547                          nbytes, 64);
1548 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
1549                          block_group, 64);
1550 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
1551 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
1552 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
1553 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
1554 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
1555 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
1556
1557 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
1558 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
1559
1560 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1561                                   struct btrfs_inode_item *inode_item,
1562                                   struct inode *inode)
1563 {
1564         btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
1565         btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
1566         btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1567         btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1568         btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1569         btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1570         btrfs_set_stack_inode_generation(inode_item,
1571                                          BTRFS_I(inode)->generation);
1572         btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
1573         btrfs_set_stack_inode_transid(inode_item, trans->transid);
1574         btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1575         btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1576         btrfs_set_stack_inode_block_group(inode_item, 0);
1577
1578         btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1579                                      inode->i_atime.tv_sec);
1580         btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1581                                       inode->i_atime.tv_nsec);
1582
1583         btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1584                                      inode->i_mtime.tv_sec);
1585         btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1586                                       inode->i_mtime.tv_nsec);
1587
1588         btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1589                                      inode->i_ctime.tv_sec);
1590         btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1591                                       inode->i_ctime.tv_nsec);
1592 }
1593
1594 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1595                                struct btrfs_root *root, struct inode *inode)
1596 {
1597         struct btrfs_delayed_node *delayed_node;
1598         int ret = 0;
1599
1600         delayed_node = btrfs_get_or_create_delayed_node(inode);
1601         if (IS_ERR(delayed_node))
1602                 return PTR_ERR(delayed_node);
1603
1604         mutex_lock(&delayed_node->mutex);
1605         if (delayed_node->inode_dirty) {
1606                 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1607                 goto release_node;
1608         }
1609
1610         ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1611         /*
1612          * we must reserve enough space when we start a new transaction,
1613          * so reserving metadata failure is impossible
1614          */
1615         BUG_ON(ret);
1616
1617         fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1618         delayed_node->inode_dirty = 1;
1619         delayed_node->count++;
1620         atomic_inc(&root->fs_info->delayed_root->items);
1621 release_node:
1622         mutex_unlock(&delayed_node->mutex);
1623         btrfs_release_delayed_node(delayed_node);
1624         return ret;
1625 }
1626
1627 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1628 {
1629         struct btrfs_root *root = delayed_node->root;
1630         struct btrfs_delayed_item *curr_item, *prev_item;
1631
1632         mutex_lock(&delayed_node->mutex);
1633         curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1634         while (curr_item) {
1635                 btrfs_delayed_item_release_metadata(root, curr_item);
1636                 prev_item = curr_item;
1637                 curr_item = __btrfs_next_delayed_item(prev_item);
1638                 btrfs_release_delayed_item(prev_item);
1639         }
1640
1641         curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1642         while (curr_item) {
1643                 btrfs_delayed_item_release_metadata(root, curr_item);
1644                 prev_item = curr_item;
1645                 curr_item = __btrfs_next_delayed_item(prev_item);
1646                 btrfs_release_delayed_item(prev_item);
1647         }
1648
1649         if (delayed_node->inode_dirty) {
1650                 btrfs_delayed_inode_release_metadata(root, delayed_node);
1651                 btrfs_release_delayed_inode(delayed_node);
1652         }
1653         mutex_unlock(&delayed_node->mutex);
1654 }
1655
1656 void btrfs_kill_delayed_inode_items(struct inode *inode)
1657 {
1658         struct btrfs_delayed_node *delayed_node;
1659
1660         delayed_node = btrfs_get_delayed_node(inode);
1661         if (!delayed_node)
1662                 return;
1663
1664         __btrfs_kill_delayed_node(delayed_node);
1665         btrfs_release_delayed_node(delayed_node);
1666 }
1667
1668 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1669 {
1670         u64 inode_id = 0;
1671         struct btrfs_delayed_node *delayed_nodes[8];
1672         int i, n;
1673
1674         while (1) {
1675                 spin_lock(&root->inode_lock);
1676                 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1677                                            (void **)delayed_nodes, inode_id,
1678                                            ARRAY_SIZE(delayed_nodes));
1679                 if (!n) {
1680                         spin_unlock(&root->inode_lock);
1681                         break;
1682                 }
1683
1684                 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1685
1686                 for (i = 0; i < n; i++)
1687                         atomic_inc(&delayed_nodes[i]->refs);
1688                 spin_unlock(&root->inode_lock);
1689
1690                 for (i = 0; i < n; i++) {
1691                         __btrfs_kill_delayed_node(delayed_nodes[i]);
1692                         btrfs_release_delayed_node(delayed_nodes[i]);
1693                 }
1694         }
1695 }