2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/delay.h>
20 #include <linux/kthread.h>
21 #include <linux/pagemap.h>
25 #include "free-space-cache.h"
26 #include "inode-map.h"
27 #include "transaction.h"
29 static int caching_kthread(void *data)
31 struct btrfs_root *root = data;
32 struct btrfs_fs_info *fs_info = root->fs_info;
33 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
35 struct btrfs_path *path;
36 struct extent_buffer *leaf;
41 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
44 path = btrfs_alloc_path();
48 /* Since the commit root is read-only, we can safely skip locking. */
49 path->skip_locking = 1;
50 path->search_commit_root = 1;
53 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
55 key.type = BTRFS_INODE_ITEM_KEY;
57 /* need to make sure the commit_root doesn't disappear */
58 mutex_lock(&root->fs_commit_mutex);
60 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
65 if (btrfs_fs_closing(fs_info))
68 leaf = path->nodes[0];
69 slot = path->slots[0];
70 if (slot >= btrfs_header_nritems(leaf)) {
71 ret = btrfs_next_leaf(root, path);
78 btrfs_transaction_in_commit(fs_info)) {
79 leaf = path->nodes[0];
81 if (btrfs_header_nritems(leaf) == 0) {
87 * Save the key so we can advances forward
90 btrfs_item_key_to_cpu(leaf, &key, 0);
91 btrfs_release_path(path);
92 root->cache_progress = last;
93 mutex_unlock(&root->fs_commit_mutex);
100 btrfs_item_key_to_cpu(leaf, &key, slot);
102 if (key.type != BTRFS_INODE_ITEM_KEY)
105 if (key.objectid >= root->highest_objectid)
108 if (last != (u64)-1 && last + 1 != key.objectid) {
109 __btrfs_add_free_space(ctl, last + 1,
110 key.objectid - last - 1);
111 wake_up(&root->cache_wait);
119 if (last < root->highest_objectid - 1) {
120 __btrfs_add_free_space(ctl, last + 1,
121 root->highest_objectid - last - 1);
124 spin_lock(&root->cache_lock);
125 root->cached = BTRFS_CACHE_FINISHED;
126 spin_unlock(&root->cache_lock);
128 root->cache_progress = (u64)-1;
129 btrfs_unpin_free_ino(root);
131 wake_up(&root->cache_wait);
132 mutex_unlock(&root->fs_commit_mutex);
134 btrfs_free_path(path);
139 static void start_caching(struct btrfs_root *root)
141 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
142 struct task_struct *tsk;
146 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
149 spin_lock(&root->cache_lock);
150 if (root->cached != BTRFS_CACHE_NO) {
151 spin_unlock(&root->cache_lock);
155 root->cached = BTRFS_CACHE_STARTED;
156 spin_unlock(&root->cache_lock);
158 ret = load_free_ino_cache(root->fs_info, root);
160 spin_lock(&root->cache_lock);
161 root->cached = BTRFS_CACHE_FINISHED;
162 spin_unlock(&root->cache_lock);
167 * It can be quite time-consuming to fill the cache by searching
168 * through the extent tree, and this can keep ino allocation path
169 * waiting. Therefore at start we quickly find out the highest
170 * inode number and we know we can use inode numbers which fall in
171 * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
173 ret = btrfs_find_free_objectid(root, &objectid);
174 if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
175 __btrfs_add_free_space(ctl, objectid,
176 BTRFS_LAST_FREE_OBJECTID - objectid + 1);
179 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
180 root->root_key.objectid);
184 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
186 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
187 return btrfs_find_free_objectid(root, objectid);
190 *objectid = btrfs_find_ino_for_alloc(root);
197 wait_event(root->cache_wait,
198 root->cached == BTRFS_CACHE_FINISHED ||
199 root->free_ino_ctl->free_space > 0);
201 if (root->cached == BTRFS_CACHE_FINISHED &&
202 root->free_ino_ctl->free_space == 0)
208 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
210 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
212 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
215 if (root->cached == BTRFS_CACHE_FINISHED) {
216 __btrfs_add_free_space(pinned, objectid, 1);
218 mutex_lock(&root->fs_commit_mutex);
219 spin_lock(&root->cache_lock);
220 if (root->cached == BTRFS_CACHE_FINISHED) {
221 spin_unlock(&root->cache_lock);
222 mutex_unlock(&root->fs_commit_mutex);
225 spin_unlock(&root->cache_lock);
229 __btrfs_add_free_space(pinned, objectid, 1);
231 mutex_unlock(&root->fs_commit_mutex);
236 * When a transaction is committed, we'll move those inode numbers which
237 * are smaller than root->cache_progress from pinned tree to free_ino tree,
238 * and others will just be dropped, because the commit root we were
239 * searching has changed.
241 * Must be called with root->fs_commit_mutex held
243 void btrfs_unpin_free_ino(struct btrfs_root *root)
245 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
246 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
247 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
248 struct btrfs_free_space *info;
252 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
256 bool add_to_ctl = true;
258 spin_lock(rbroot_lock);
259 n = rb_first(rbroot);
261 spin_unlock(rbroot_lock);
265 info = rb_entry(n, struct btrfs_free_space, offset_index);
266 BUG_ON(info->bitmap);
268 if (info->offset > root->cache_progress)
270 else if (info->offset + info->bytes > root->cache_progress)
271 count = root->cache_progress - info->offset + 1;
275 rb_erase(&info->offset_index, rbroot);
276 spin_unlock(rbroot_lock);
278 __btrfs_add_free_space(ctl, info->offset, count);
279 kmem_cache_free(btrfs_free_space_cachep, info);
283 #define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
284 #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
287 * The goal is to keep the memory used by the free_ino tree won't
288 * exceed the memory if we use bitmaps only.
290 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
292 struct btrfs_free_space *info;
297 n = rb_last(&ctl->free_space_offset);
299 ctl->extents_thresh = INIT_THRESHOLD;
302 info = rb_entry(n, struct btrfs_free_space, offset_index);
305 * Find the maximum inode number in the filesystem. Note we
306 * ignore the fact that this can be a bitmap, because we are
307 * not doing precise calculation.
309 max_ino = info->bytes - 1;
311 max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
312 if (max_bitmaps <= ctl->total_bitmaps) {
313 ctl->extents_thresh = 0;
317 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
318 PAGE_CACHE_SIZE / sizeof(*info);
322 * We don't fall back to bitmap, if we are below the extents threshold
323 * or this chunk of inode numbers is a big one.
325 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
326 struct btrfs_free_space *info)
328 if (ctl->free_extents < ctl->extents_thresh ||
329 info->bytes > INODES_PER_BITMAP / 10)
335 static struct btrfs_free_space_op free_ino_op = {
336 .recalc_thresholds = recalculate_thresholds,
337 .use_bitmap = use_bitmap,
340 static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
344 static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
345 struct btrfs_free_space *info)
348 * We always use extents for two reasons:
350 * - The pinned tree is only used during the process of caching
352 * - Make code simpler. See btrfs_unpin_free_ino().
357 static struct btrfs_free_space_op pinned_free_ino_op = {
358 .recalc_thresholds = pinned_recalc_thresholds,
359 .use_bitmap = pinned_use_bitmap,
362 void btrfs_init_free_ino_ctl(struct btrfs_root *root)
364 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
365 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
367 spin_lock_init(&ctl->tree_lock);
371 ctl->op = &free_ino_op;
374 * Initially we allow to use 16K of ram to cache chunks of
375 * inode numbers before we resort to bitmaps. This is somewhat
376 * arbitrary, but it will be adjusted in runtime.
378 ctl->extents_thresh = INIT_THRESHOLD;
380 spin_lock_init(&pinned->tree_lock);
383 pinned->private = NULL;
384 pinned->extents_thresh = 0;
385 pinned->op = &pinned_free_ino_op;
388 int btrfs_save_ino_cache(struct btrfs_root *root,
389 struct btrfs_trans_handle *trans)
391 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
392 struct btrfs_path *path;
394 struct btrfs_block_rsv *rsv;
401 /* only fs tree and subvol/snap needs ino cache */
402 if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
403 (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
404 root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
407 /* Don't save inode cache if we are deleting this root */
408 if (btrfs_root_refs(&root->root_item) == 0 &&
409 root != root->fs_info->tree_root)
412 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
415 path = btrfs_alloc_path();
419 rsv = trans->block_rsv;
420 trans->block_rsv = &root->fs_info->trans_block_rsv;
422 num_bytes = trans->bytes_reserved;
424 * 1 item for inode item insertion if need
425 * 3 items for inode item update (in the worst case)
426 * 1 item for free space object
427 * 3 items for pre-allocation
429 trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
430 ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
431 trans->bytes_reserved);
435 inode = lookup_free_ino_inode(root, path);
436 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
437 ret = PTR_ERR(inode);
445 ret = create_free_ino_inode(root, trans, path);
451 BTRFS_I(inode)->generation = 0;
452 ret = btrfs_update_inode(trans, root, inode);
455 if (i_size_read(inode) > 0) {
456 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
461 spin_lock(&root->cache_lock);
462 if (root->cached != BTRFS_CACHE_FINISHED) {
464 spin_unlock(&root->cache_lock);
467 spin_unlock(&root->cache_lock);
469 spin_lock(&ctl->tree_lock);
470 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
471 prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
472 prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
473 spin_unlock(&ctl->tree_lock);
475 /* Just to make sure we have enough space */
476 prealloc += 8 * PAGE_CACHE_SIZE;
478 ret = btrfs_delalloc_reserve_space(inode, prealloc);
482 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
483 prealloc, prealloc, &alloc_hint);
485 btrfs_delalloc_release_space(inode, prealloc);
488 btrfs_free_reserved_data_space(inode, prealloc);
490 ret = btrfs_write_out_ino_cache(root, trans, path);
494 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
496 trans->block_rsv = rsv;
497 trans->bytes_reserved = num_bytes;
499 btrfs_free_path(path);
503 static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
505 struct btrfs_path *path;
507 struct extent_buffer *l;
508 struct btrfs_key search_key;
509 struct btrfs_key found_key;
512 path = btrfs_alloc_path();
516 search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
517 search_key.type = -1;
518 search_key.offset = (u64)-1;
519 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
523 if (path->slots[0] > 0) {
524 slot = path->slots[0] - 1;
526 btrfs_item_key_to_cpu(l, &found_key, slot);
527 *objectid = max_t(u64, found_key.objectid,
528 BTRFS_FIRST_FREE_OBJECTID - 1);
530 *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
534 btrfs_free_path(path);
538 int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
541 mutex_lock(&root->objectid_mutex);
543 if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
544 ret = btrfs_find_highest_objectid(root,
545 &root->highest_objectid);
550 if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
555 *objectid = ++root->highest_objectid;
558 mutex_unlock(&root->objectid_mutex);