1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Tracking the up-to-date-ness of a local buffer_head with respect to
9 * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
26 * Standard buffer head caching flags (uptodate, etc) are insufficient
27 * in a clustered environment - a buffer may be marked up to date on
28 * our local node but could have been modified by another cluster
29 * member. As a result an additional (and performant) caching scheme
30 * is required. A further requirement is that we consume as little
31 * memory as possible - we never pin buffer_head structures in order
34 * We track the existence of up to date buffers on the inodes which
35 * are associated with them. Because we don't want to pin
36 * buffer_heads, this is only a (strong) hint and several other checks
37 * are made in the I/O path to ensure that we don't use a stale or
38 * invalid buffer without going to disk:
39 * - buffer_jbd is used liberally - if a bh is in the journal on
40 * this node then it *must* be up to date.
41 * - the standard buffer_uptodate() macro is used to detect buffers
42 * which may be invalid (even if we have an up to date tracking
45 * For a full understanding of how this code works together, one
46 * should read the callers in dlmglue.c, the I/O functions in
47 * buffer_head_io.c and ocfs2_journal_access in journal.c
51 #include <linux/types.h>
52 #include <linux/slab.h>
53 #include <linux/highmem.h>
54 #include <linux/buffer_head.h>
55 #include <linux/rbtree.h>
56 #ifndef CONFIG_OCFS2_COMPAT_JBD
57 # include <linux/jbd2.h>
59 # include <linux/jbd.h>
62 #define MLOG_MASK_PREFIX ML_UPTODATE
64 #include <cluster/masklog.h>
71 struct ocfs2_meta_cache_item {
72 struct rb_node c_node;
76 static struct kmem_cache *ocfs2_uptodate_cachep = NULL;
78 static u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci)
80 BUG_ON(!ci || !ci->ci_ops);
82 return ci->ci_ops->co_owner(ci);
85 static void ocfs2_metadata_cache_lock(struct ocfs2_caching_info *ci)
87 BUG_ON(!ci || !ci->ci_ops);
89 ci->ci_ops->co_cache_lock(ci);
92 static void ocfs2_metadata_cache_unlock(struct ocfs2_caching_info *ci)
94 BUG_ON(!ci || !ci->ci_ops);
96 ci->ci_ops->co_cache_unlock(ci);
99 static void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci)
101 BUG_ON(!ci || !ci->ci_ops);
103 ci->ci_ops->co_io_lock(ci);
106 static void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci)
108 BUG_ON(!ci || !ci->ci_ops);
110 ci->ci_ops->co_io_unlock(ci);
114 void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci,
115 const struct ocfs2_caching_operations *ops)
120 ci->ci_flags |= OCFS2_CACHE_FL_INLINE;
121 ci->ci_num_cached = 0;
124 /* No lock taken here as 'root' is not expected to be visible to other
126 static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
128 unsigned int purged = 0;
129 struct rb_node *node;
130 struct ocfs2_meta_cache_item *item;
132 while ((node = rb_last(root)) != NULL) {
133 item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
135 mlog(0, "Purge item %llu\n",
136 (unsigned long long) item->c_block);
138 rb_erase(&item->c_node, root);
139 kmem_cache_free(ocfs2_uptodate_cachep, item);
146 /* Called from locking and called from ocfs2_clear_inode. Dump the
147 * cache for a given inode.
149 * This function is a few more lines longer than necessary due to some
150 * accounting done here, but I think it's worth tracking down those
151 * bugs sooner -- Mark */
152 void ocfs2_metadata_cache_purge(struct inode *inode)
154 struct ocfs2_inode_info *oi = OCFS2_I(inode);
155 unsigned int tree, to_purge, purged;
156 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
157 struct rb_root root = RB_ROOT;
159 BUG_ON(!ci || !ci->ci_ops);
161 ocfs2_metadata_cache_lock(ci);
162 tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
163 to_purge = ci->ci_num_cached;
165 mlog(0, "Purge %u %s items from Owner %llu\n", to_purge,
166 tree ? "array" : "tree",
167 (unsigned long long)ocfs2_metadata_cache_owner(ci));
169 /* If we're a tree, save off the root so that we can safely
170 * initialize the cache. We do the work to free tree members
171 * without the spinlock. */
173 root = ci->ci_cache.ci_tree;
175 ocfs2_metadata_cache_init(ci, ci->ci_ops);
176 ocfs2_metadata_cache_unlock(ci);
178 purged = ocfs2_purge_copied_metadata_tree(&root);
179 /* If possible, track the number wiped so that we can more
180 * easily detect counting errors. Unfortunately, this is only
181 * meaningful for trees. */
182 if (tree && purged != to_purge)
183 mlog(ML_ERROR, "Owner %llu, count = %u, purged = %u\n",
184 (unsigned long long)ocfs2_metadata_cache_owner(ci),
188 /* Returns the index in the cache array, -1 if not found.
189 * Requires ip_lock. */
190 static int ocfs2_search_cache_array(struct ocfs2_caching_info *ci,
195 for (i = 0; i < ci->ci_num_cached; i++) {
196 if (item == ci->ci_cache.ci_array[i])
203 /* Returns the cache item if found, otherwise NULL.
204 * Requires ip_lock. */
205 static struct ocfs2_meta_cache_item *
206 ocfs2_search_cache_tree(struct ocfs2_caching_info *ci,
209 struct rb_node * n = ci->ci_cache.ci_tree.rb_node;
210 struct ocfs2_meta_cache_item *item = NULL;
213 item = rb_entry(n, struct ocfs2_meta_cache_item, c_node);
215 if (block < item->c_block)
217 else if (block > item->c_block)
226 static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi,
227 struct buffer_head *bh)
230 struct ocfs2_meta_cache_item *item = NULL;
231 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
233 ocfs2_metadata_cache_lock(ci);
235 mlog(0, "Owner %llu, query block %llu (inline = %u)\n",
236 (unsigned long long)ocfs2_metadata_cache_owner(ci),
237 (unsigned long long) bh->b_blocknr,
238 !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
240 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE)
241 index = ocfs2_search_cache_array(&oi->ip_metadata_cache,
244 item = ocfs2_search_cache_tree(&oi->ip_metadata_cache,
247 ocfs2_metadata_cache_unlock(ci);
249 mlog(0, "index = %d, item = %p\n", index, item);
251 return (index != -1) || (item != NULL);
254 /* Warning: even if it returns true, this does *not* guarantee that
255 * the block is stored in our inode metadata cache.
257 * This can be called under lock_buffer()
259 int ocfs2_buffer_uptodate(struct inode *inode,
260 struct buffer_head *bh)
262 /* Doesn't matter if the bh is in our cache or not -- if it's
263 * not marked uptodate then we know it can't have correct
265 if (!buffer_uptodate(bh))
268 /* OCFS2 does not allow multiple nodes to be changing the same
269 * block at the same time. */
273 /* Ok, locally the buffer is marked as up to date, now search
274 * our cache to see if we can trust that. */
275 return ocfs2_buffer_cached(OCFS2_I(inode), bh);
279 * Determine whether a buffer is currently out on a read-ahead request.
280 * ci_io_sem should be held to serialize submitters with the logic here.
282 int ocfs2_buffer_read_ahead(struct inode *inode,
283 struct buffer_head *bh)
285 return buffer_locked(bh) && ocfs2_buffer_cached(OCFS2_I(inode), bh);
288 /* Requires ip_lock */
289 static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
292 BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY);
294 mlog(0, "block %llu takes position %u\n", (unsigned long long) block,
297 ci->ci_cache.ci_array[ci->ci_num_cached] = block;
301 /* By now the caller should have checked that the item does *not*
303 * Requires ip_lock. */
304 static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
305 struct ocfs2_meta_cache_item *new)
307 sector_t block = new->c_block;
308 struct rb_node *parent = NULL;
309 struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
310 struct ocfs2_meta_cache_item *tmp;
312 mlog(0, "Insert block %llu num = %u\n", (unsigned long long) block,
318 tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node);
320 if (block < tmp->c_block)
322 else if (block > tmp->c_block)
325 /* This should never happen! */
326 mlog(ML_ERROR, "Duplicate block %llu cached!\n",
327 (unsigned long long) block);
332 rb_link_node(&new->c_node, parent, p);
333 rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree);
337 /* co_cache_lock() must be held */
338 static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi,
339 struct ocfs2_caching_info *ci)
341 return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) &&
342 (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY);
345 /* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the
346 * pointers in tree after we use them - this allows caller to detect
347 * when to free in case of error.
349 * The co_cache_lock() must be held. */
350 static void ocfs2_expand_cache(struct ocfs2_inode_info *oi,
351 struct ocfs2_meta_cache_item **tree)
354 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
356 mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY,
357 "Owner %llu, num cached = %u, should be %u\n",
358 (unsigned long long)ocfs2_metadata_cache_owner(ci),
359 ci->ci_num_cached, OCFS2_CACHE_INFO_MAX_ARRAY);
360 mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE),
361 "Owner %llu not marked as inline anymore!\n",
362 (unsigned long long)ocfs2_metadata_cache_owner(ci));
364 /* Be careful to initialize the tree members *first* because
365 * once the ci_tree is used, the array is junk... */
366 for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
367 tree[i]->c_block = ci->ci_cache.ci_array[i];
369 ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE;
370 ci->ci_cache.ci_tree = RB_ROOT;
371 /* this will be set again by __ocfs2_insert_cache_tree */
372 ci->ci_num_cached = 0;
374 for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
375 __ocfs2_insert_cache_tree(ci, tree[i]);
379 mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n",
380 (unsigned long long)ocfs2_metadata_cache_owner(ci),
381 ci->ci_flags, ci->ci_num_cached);
384 /* Slow path function - memory allocation is necessary. See the
385 * comment above ocfs2_set_buffer_uptodate for more information. */
386 static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
391 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
392 struct ocfs2_meta_cache_item *new = NULL;
393 struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
396 mlog(0, "Owner %llu, block %llu, expand = %d\n",
397 (unsigned long long)ocfs2_metadata_cache_owner(ci),
398 (unsigned long long)block, expand_tree);
400 new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
405 new->c_block = block;
408 /* Do *not* allocate an array here - the removal code
409 * has no way of tracking that. */
410 for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
411 tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
418 /* These are initialized in ocfs2_expand_cache! */
422 ocfs2_metadata_cache_lock(ci);
423 if (ocfs2_insert_can_use_array(oi, ci)) {
424 mlog(0, "Someone cleared the tree underneath us\n");
425 /* Ok, items were removed from the cache in between
426 * locks. Detect this and revert back to the fast path */
427 ocfs2_append_cache_array(ci, block);
428 ocfs2_metadata_cache_unlock(ci);
433 ocfs2_expand_cache(oi, tree);
435 __ocfs2_insert_cache_tree(ci, new);
436 ocfs2_metadata_cache_unlock(ci);
441 kmem_cache_free(ocfs2_uptodate_cachep, new);
443 /* If these were used, then ocfs2_expand_cache re-set them to
446 for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
448 kmem_cache_free(ocfs2_uptodate_cachep,
453 /* Item insertion is guarded by co_io_lock(), so the insertion path takes
454 * advantage of this by not rechecking for a duplicate insert during
455 * the slow case. Additionally, if the cache needs to be bumped up to
456 * a tree, the code will not recheck after acquiring the lock --
457 * multiple paths cannot be expanding to a tree at the same time.
459 * The slow path takes into account that items can be removed
460 * (including the whole tree wiped and reset) when this process it out
461 * allocating memory. In those cases, it reverts back to the fast
464 * Note that this function may actually fail to insert the block if
465 * memory cannot be allocated. This is not fatal however (but may
466 * result in a performance penalty)
468 * Readahead buffers can be passed in here before the I/O request is
471 void ocfs2_set_buffer_uptodate(struct inode *inode,
472 struct buffer_head *bh)
475 struct ocfs2_inode_info *oi = OCFS2_I(inode);
476 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
478 /* The block may very well exist in our cache already, so avoid
479 * doing any more work in that case. */
480 if (ocfs2_buffer_cached(oi, bh))
483 mlog(0, "Owner %llu, inserting block %llu\n",
484 (unsigned long long)ocfs2_metadata_cache_owner(ci),
485 (unsigned long long)bh->b_blocknr);
487 /* No need to recheck under spinlock - insertion is guarded by
489 ocfs2_metadata_cache_lock(ci);
490 if (ocfs2_insert_can_use_array(oi, ci)) {
491 /* Fast case - it's an array and there's a free
493 ocfs2_append_cache_array(ci, bh->b_blocknr);
494 ocfs2_metadata_cache_unlock(ci);
499 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
500 /* We need to bump things up to a tree. */
503 ocfs2_metadata_cache_unlock(ci);
505 __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand);
508 /* Called against a newly allocated buffer. Most likely nobody should
509 * be able to read this sort of metadata while it's still being
510 * allocated, but this is careful to take co_io_lock() anyway. */
511 void ocfs2_set_new_buffer_uptodate(struct inode *inode,
512 struct buffer_head *bh)
514 struct ocfs2_inode_info *oi = OCFS2_I(inode);
515 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
517 /* This should definitely *not* exist in our cache */
518 BUG_ON(ocfs2_buffer_cached(oi, bh));
520 set_buffer_uptodate(bh);
522 ocfs2_metadata_cache_io_lock(ci);
523 ocfs2_set_buffer_uptodate(inode, bh);
524 ocfs2_metadata_cache_io_unlock(ci);
527 /* Requires ip_lock. */
528 static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
531 sector_t *array = ci->ci_cache.ci_array;
534 BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY);
535 BUG_ON(index >= ci->ci_num_cached);
536 BUG_ON(!ci->ci_num_cached);
538 mlog(0, "remove index %d (num_cached = %u\n", index,
543 /* don't need to copy if the array is now empty, or if we
544 * removed at the tail */
545 if (ci->ci_num_cached && index < ci->ci_num_cached) {
546 bytes = sizeof(sector_t) * (ci->ci_num_cached - index);
547 memmove(&array[index], &array[index + 1], bytes);
551 /* Requires ip_lock. */
552 static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
553 struct ocfs2_meta_cache_item *item)
555 mlog(0, "remove block %llu from tree\n",
556 (unsigned long long) item->c_block);
558 rb_erase(&item->c_node, &ci->ci_cache.ci_tree);
562 static void ocfs2_remove_block_from_cache(struct inode *inode,
566 struct ocfs2_meta_cache_item *item = NULL;
567 struct ocfs2_inode_info *oi = OCFS2_I(inode);
568 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
570 ocfs2_metadata_cache_lock(ci);
571 mlog(0, "Owner %llu, remove %llu, items = %u, array = %u\n",
572 (unsigned long long)ocfs2_metadata_cache_owner(ci),
573 (unsigned long long) block, ci->ci_num_cached,
574 ci->ci_flags & OCFS2_CACHE_FL_INLINE);
576 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
577 index = ocfs2_search_cache_array(ci, block);
579 ocfs2_remove_metadata_array(ci, index);
581 item = ocfs2_search_cache_tree(ci, block);
583 ocfs2_remove_metadata_tree(ci, item);
585 ocfs2_metadata_cache_unlock(ci);
588 kmem_cache_free(ocfs2_uptodate_cachep, item);
592 * Called when we remove a chunk of metadata from an inode. We don't
593 * bother reverting things to an inlined array in the case of a remove
594 * which moves us back under the limit.
596 void ocfs2_remove_from_cache(struct inode *inode,
597 struct buffer_head *bh)
599 sector_t block = bh->b_blocknr;
601 ocfs2_remove_block_from_cache(inode, block);
604 /* Called when we remove xattr clusters from an inode. */
605 void ocfs2_remove_xattr_clusters_from_cache(struct inode *inode,
609 unsigned int i, b_len = ocfs2_clusters_to_blocks(inode->i_sb, 1) * c_len;
611 for (i = 0; i < b_len; i++, block++)
612 ocfs2_remove_block_from_cache(inode, block);
615 int __init init_ocfs2_uptodate_cache(void)
617 ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate",
618 sizeof(struct ocfs2_meta_cache_item),
619 0, SLAB_HWCACHE_ALIGN, NULL);
620 if (!ocfs2_uptodate_cachep)
623 mlog(0, "%u inlined cache items per inode.\n",
624 OCFS2_CACHE_INFO_MAX_ARRAY);
629 void exit_ocfs2_uptodate_cache(void)
631 if (ocfs2_uptodate_cachep)
632 kmem_cache_destroy(ocfs2_uptodate_cachep);