[GFS2] Fix up merge of Linus' kernel into GFS2
[pandora-kernel.git] / fs / gfs2 / meta_io.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mm.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/swap.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/lm_interface.h>
22
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "glock.h"
26 #include "glops.h"
27 #include "inode.h"
28 #include "log.h"
29 #include "lops.h"
30 #include "meta_io.h"
31 #include "rgrp.h"
32 #include "trans.h"
33 #include "util.h"
34 #include "ops_address.h"
35
36 #define buffer_busy(bh) \
37 ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
38 #define buffer_in_io(bh) \
39 ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
40
41 static int aspace_get_block(struct inode *inode, sector_t lblock,
42                             struct buffer_head *bh_result, int create)
43 {
44         gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
45         return -EOPNOTSUPP;
46 }
47
48 static int gfs2_aspace_writepage(struct page *page,
49                                  struct writeback_control *wbc)
50 {
51         return block_write_full_page(page, aspace_get_block, wbc);
52 }
53
54 static const struct address_space_operations aspace_aops = {
55         .writepage = gfs2_aspace_writepage,
56         .releasepage = gfs2_releasepage,
57 };
58
59 /**
60  * gfs2_aspace_get - Create and initialize a struct inode structure
61  * @sdp: the filesystem the aspace is in
62  *
63  * Right now a struct inode is just a struct inode.  Maybe Linux
64  * will supply a more lightweight address space construct (that works)
65  * in the future.
66  *
67  * Make sure pages/buffers in this aspace aren't in high memory.
68  *
69  * Returns: the aspace
70  */
71
72 struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
73 {
74         struct inode *aspace;
75
76         aspace = new_inode(sdp->sd_vfs);
77         if (aspace) {
78                 mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
79                 aspace->i_mapping->a_ops = &aspace_aops;
80                 aspace->i_size = ~0ULL;
81                 aspace->u.generic_ip = NULL;
82                 insert_inode_hash(aspace);
83         }
84         return aspace;
85 }
86
87 void gfs2_aspace_put(struct inode *aspace)
88 {
89         remove_inode_hash(aspace);
90         iput(aspace);
91 }
92
93 /**
94  * gfs2_ail1_start_one - Start I/O on a part of the AIL
95  * @sdp: the filesystem
96  * @tr: the part of the AIL
97  *
98  */
99
100 void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
101 {
102         struct gfs2_bufdata *bd, *s;
103         struct buffer_head *bh;
104         int retry;
105
106         BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
107
108         do {
109                 retry = 0;
110
111                 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
112                                                  bd_ail_st_list) {
113                         bh = bd->bd_bh;
114
115                         gfs2_assert(sdp, bd->bd_ail == ai);
116
117                         if (!buffer_busy(bh)) {
118                                 if (!buffer_uptodate(bh)) {
119                                         gfs2_log_unlock(sdp);
120                                         gfs2_io_error_bh(sdp, bh);
121                                         gfs2_log_lock(sdp);
122                                 }
123                                 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
124                                 continue;
125                         }
126
127                         if (!buffer_dirty(bh))
128                                 continue;
129
130                         list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
131
132                         gfs2_log_unlock(sdp);
133                         wait_on_buffer(bh);
134                         ll_rw_block(WRITE, 1, &bh);
135                         gfs2_log_lock(sdp);
136
137                         retry = 1;
138                         break;
139                 }
140         } while (retry);
141 }
142
143 /**
144  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
145  * @sdp: the filesystem
146  * @ai: the AIL entry
147  *
148  */
149
150 int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
151 {
152         struct gfs2_bufdata *bd, *s;
153         struct buffer_head *bh;
154
155         list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
156                                          bd_ail_st_list) {
157                 bh = bd->bd_bh;
158
159                 gfs2_assert(sdp, bd->bd_ail == ai);
160
161                 if (buffer_busy(bh)) {
162                         if (flags & DIO_ALL)
163                                 continue;
164                         else
165                                 break;
166                 }
167
168                 if (!buffer_uptodate(bh))
169                         gfs2_io_error_bh(sdp, bh);
170
171                 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
172         }
173
174         return list_empty(&ai->ai_ail1_list);
175 }
176
177 /**
178  * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
179  * @sdp: the filesystem
180  * @ai: the AIL entry
181  *
182  */
183
184 void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
185 {
186         struct list_head *head = &ai->ai_ail2_list;
187         struct gfs2_bufdata *bd;
188
189         while (!list_empty(head)) {
190                 bd = list_entry(head->prev, struct gfs2_bufdata,
191                                 bd_ail_st_list);
192                 gfs2_assert(sdp, bd->bd_ail == ai);
193                 bd->bd_ail = NULL;
194                 list_del(&bd->bd_ail_st_list);
195                 list_del(&bd->bd_ail_gl_list);
196                 atomic_dec(&bd->bd_gl->gl_ail_count);
197                 brelse(bd->bd_bh);
198         }
199 }
200
201 /**
202  * ail_empty_gl - remove all buffers for a given lock from the AIL
203  * @gl: the glock
204  *
205  * None of the buffers should be dirty, locked, or pinned.
206  */
207
208 void gfs2_ail_empty_gl(struct gfs2_glock *gl)
209 {
210         struct gfs2_sbd *sdp = gl->gl_sbd;
211         unsigned int blocks;
212         struct list_head *head = &gl->gl_ail_list;
213         struct gfs2_bufdata *bd;
214         struct buffer_head *bh;
215         u64 blkno;
216         int error;
217
218         blocks = atomic_read(&gl->gl_ail_count);
219         if (!blocks)
220                 return;
221
222         error = gfs2_trans_begin(sdp, 0, blocks);
223         if (gfs2_assert_withdraw(sdp, !error))
224                 return;
225
226         gfs2_log_lock(sdp);
227         while (!list_empty(head)) {
228                 bd = list_entry(head->next, struct gfs2_bufdata,
229                                 bd_ail_gl_list);
230                 bh = bd->bd_bh;
231                 blkno = bh->b_blocknr;
232                 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
233
234                 bd->bd_ail = NULL;
235                 list_del(&bd->bd_ail_st_list);
236                 list_del(&bd->bd_ail_gl_list);
237                 atomic_dec(&gl->gl_ail_count);
238                 brelse(bh);
239                 gfs2_log_unlock(sdp);
240
241                 gfs2_trans_add_revoke(sdp, blkno);
242
243                 gfs2_log_lock(sdp);
244         }
245         gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
246         gfs2_log_unlock(sdp);
247
248         gfs2_trans_end(sdp);
249         gfs2_log_flush(sdp, NULL);
250 }
251
252 /**
253  * gfs2_meta_inval - Invalidate all buffers associated with a glock
254  * @gl: the glock
255  *
256  */
257
258 void gfs2_meta_inval(struct gfs2_glock *gl)
259 {
260         struct gfs2_sbd *sdp = gl->gl_sbd;
261         struct inode *aspace = gl->gl_aspace;
262         struct address_space *mapping = gl->gl_aspace->i_mapping;
263
264         gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
265
266         atomic_inc(&aspace->i_writecount);
267         truncate_inode_pages(mapping, 0);
268         atomic_dec(&aspace->i_writecount);
269
270         gfs2_assert_withdraw(sdp, !mapping->nrpages);
271 }
272
273 /**
274  * gfs2_meta_sync - Sync all buffers associated with a glock
275  * @gl: The glock
276  *
277  */
278
279 void gfs2_meta_sync(struct gfs2_glock *gl)
280 {
281         struct address_space *mapping = gl->gl_aspace->i_mapping;
282         int error;
283
284         filemap_fdatawrite(mapping);
285         error = filemap_fdatawait(mapping);
286
287         if (error)
288                 gfs2_io_error(gl->gl_sbd);
289 }
290
291 /**
292  * getbuf - Get a buffer with a given address space
293  * @sdp: the filesystem
294  * @aspace: the address space
295  * @blkno: the block number (filesystem scope)
296  * @create: 1 if the buffer should be created
297  *
298  * Returns: the buffer
299  */
300
301 static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
302                                   u64 blkno, int create)
303 {
304         struct page *page;
305         struct buffer_head *bh;
306         unsigned int shift;
307         unsigned long index;
308         unsigned int bufnum;
309
310         shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
311         index = blkno >> shift;             /* convert block to page */
312         bufnum = blkno - (index << shift);  /* block buf index within page */
313
314         if (create) {
315                 for (;;) {
316                         page = grab_cache_page(aspace->i_mapping, index);
317                         if (page)
318                                 break;
319                         yield();
320                 }
321         } else {
322                 page = find_lock_page(aspace->i_mapping, index);
323                 if (!page)
324                         return NULL;
325         }
326
327         if (!page_has_buffers(page))
328                 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
329
330         /* Locate header for our buffer within our page */
331         for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
332                 /* Do nothing */;
333         get_bh(bh);
334
335         if (!buffer_mapped(bh))
336                 map_bh(bh, sdp->sd_vfs, blkno);
337
338         unlock_page(page);
339         mark_page_accessed(page);
340         page_cache_release(page);
341
342         return bh;
343 }
344
345 static void meta_prep_new(struct buffer_head *bh)
346 {
347         struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
348
349         lock_buffer(bh);
350         clear_buffer_dirty(bh);
351         set_buffer_uptodate(bh);
352         unlock_buffer(bh);
353
354         mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
355 }
356
357 /**
358  * gfs2_meta_new - Get a block
359  * @gl: The glock associated with this block
360  * @blkno: The block number
361  *
362  * Returns: The buffer
363  */
364
365 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
366 {
367         struct buffer_head *bh;
368         bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
369         meta_prep_new(bh);
370         return bh;
371 }
372
373 /**
374  * gfs2_meta_read - Read a block from disk
375  * @gl: The glock covering the block
376  * @blkno: The block number
377  * @flags: flags
378  * @bhp: the place where the buffer is returned (NULL on failure)
379  *
380  * Returns: errno
381  */
382
383 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
384                    struct buffer_head **bhp)
385 {
386         *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
387         if (!buffer_uptodate(*bhp))
388                 ll_rw_block(READ, 1, bhp);
389         if (flags & DIO_WAIT) {
390                 int error = gfs2_meta_wait(gl->gl_sbd, *bhp);
391                 if (error) {
392                         brelse(*bhp);
393                         return error;
394                 }
395         }
396
397         return 0;
398 }
399
400 /**
401  * gfs2_meta_wait - Reread a block from disk
402  * @sdp: the filesystem
403  * @bh: The block to wait for
404  *
405  * Returns: errno
406  */
407
408 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
409 {
410         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
411                 return -EIO;
412
413         wait_on_buffer(bh);
414
415         if (!buffer_uptodate(bh)) {
416                 struct gfs2_trans *tr = current->journal_info;
417                 if (tr && tr->tr_touched)
418                         gfs2_io_error_bh(sdp, bh);
419                 return -EIO;
420         }
421         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
422                 return -EIO;
423
424         return 0;
425 }
426
427 /**
428  * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
429  * @gl: the glock the buffer belongs to
430  * @bh: The buffer to be attached to
431  * @meta: Flag to indicate whether its metadata or not
432  */
433
434 void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
435                          int meta)
436 {
437         struct gfs2_bufdata *bd;
438
439         if (meta)
440                 lock_page(bh->b_page);
441
442         if (bh->b_private) {
443                 if (meta)
444                         unlock_page(bh->b_page);
445                 return;
446         }
447
448         bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
449         memset(bd, 0, sizeof(struct gfs2_bufdata));
450         bd->bd_bh = bh;
451         bd->bd_gl = gl;
452
453         INIT_LIST_HEAD(&bd->bd_list_tr);
454         if (meta)
455                 lops_init_le(&bd->bd_le, &gfs2_buf_lops);
456         else
457                 lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
458         bh->b_private = bd;
459
460         if (meta)
461                 unlock_page(bh->b_page);
462 }
463
464 /**
465  * gfs2_pin - Pin a buffer in memory
466  * @sdp: the filesystem the buffer belongs to
467  * @bh: The buffer to be pinned
468  *
469  */
470
471 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
472 {
473         struct gfs2_bufdata *bd = bh->b_private;
474
475         gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
476
477         if (test_set_buffer_pinned(bh))
478                 gfs2_assert_withdraw(sdp, 0);
479
480         wait_on_buffer(bh);
481
482         /* If this buffer is in the AIL and it has already been written
483            to in-place disk block, remove it from the AIL. */
484
485         gfs2_log_lock(sdp);
486         if (bd->bd_ail && !buffer_in_io(bh))
487                 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
488         gfs2_log_unlock(sdp);
489
490         clear_buffer_dirty(bh);
491         wait_on_buffer(bh);
492
493         if (!buffer_uptodate(bh))
494                 gfs2_io_error_bh(sdp, bh);
495
496         get_bh(bh);
497 }
498
499 /**
500  * gfs2_unpin - Unpin a buffer
501  * @sdp: the filesystem the buffer belongs to
502  * @bh: The buffer to unpin
503  * @ai:
504  *
505  */
506
507 void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
508                 struct gfs2_ail *ai)
509 {
510         struct gfs2_bufdata *bd = bh->b_private;
511
512         gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
513
514         if (!buffer_pinned(bh))
515                 gfs2_assert_withdraw(sdp, 0);
516
517         mark_buffer_dirty(bh);
518         clear_buffer_pinned(bh);
519
520         gfs2_log_lock(sdp);
521         if (bd->bd_ail) {
522                 list_del(&bd->bd_ail_st_list);
523                 brelse(bh);
524         } else {
525                 struct gfs2_glock *gl = bd->bd_gl;
526                 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
527                 atomic_inc(&gl->gl_ail_count);
528         }
529         bd->bd_ail = ai;
530         list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
531         gfs2_log_unlock(sdp);
532 }
533
534 /**
535  * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
536  * @ip: the inode who owns the buffers
537  * @bstart: the first buffer in the run
538  * @blen: the number of buffers in the run
539  *
540  */
541
542 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
543 {
544         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
545         struct inode *aspace = ip->i_gl->gl_aspace;
546         struct buffer_head *bh;
547
548         while (blen) {
549                 bh = getbuf(sdp, aspace, bstart, NO_CREATE);
550                 if (bh) {
551                         struct gfs2_bufdata *bd = bh->b_private;
552
553                         if (test_clear_buffer_pinned(bh)) {
554                                 struct gfs2_trans *tr = current->journal_info;
555                                 gfs2_log_lock(sdp);
556                                 list_del_init(&bd->bd_le.le_list);
557                                 gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
558                                 sdp->sd_log_num_buf--;
559                                 gfs2_log_unlock(sdp);
560                                 tr->tr_num_buf_rm++;
561                                 brelse(bh);
562                         }
563                         if (bd) {
564                                 gfs2_log_lock(sdp);
565                                 if (bd->bd_ail) {
566                                         u64 blkno = bh->b_blocknr;
567                                         bd->bd_ail = NULL;
568                                         list_del(&bd->bd_ail_st_list);
569                                         list_del(&bd->bd_ail_gl_list);
570                                         atomic_dec(&bd->bd_gl->gl_ail_count);
571                                         brelse(bh);
572                                         gfs2_log_unlock(sdp);
573                                         gfs2_trans_add_revoke(sdp, blkno);
574                                 } else
575                                         gfs2_log_unlock(sdp);
576                         }
577
578                         lock_buffer(bh);
579                         clear_buffer_dirty(bh);
580                         clear_buffer_uptodate(bh);
581                         unlock_buffer(bh);
582
583                         brelse(bh);
584                 }
585
586                 bstart++;
587                 blen--;
588         }
589 }
590
591 /**
592  * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
593  * @ip: The GFS2 inode
594  *
595  * This releases buffers that are in the most-recently-used array of
596  * blocks used for indirect block addressing for this inode.
597  */
598
599 void gfs2_meta_cache_flush(struct gfs2_inode *ip)
600 {
601         struct buffer_head **bh_slot;
602         unsigned int x;
603
604         spin_lock(&ip->i_spin);
605
606         for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
607                 bh_slot = &ip->i_cache[x];
608                 if (!*bh_slot)
609                         break;
610                 brelse(*bh_slot);
611                 *bh_slot = NULL;
612         }
613
614         spin_unlock(&ip->i_spin);
615 }
616
617 /**
618  * gfs2_meta_indirect_buffer - Get a metadata buffer
619  * @ip: The GFS2 inode
620  * @height: The level of this buf in the metadata (indir addr) tree (if any)
621  * @num: The block number (device relative) of the buffer
622  * @new: Non-zero if we may create a new buffer
623  * @bhp: the buffer is returned here
624  *
625  * Try to use the gfs2_inode's MRU metadata tree cache.
626  *
627  * Returns: errno
628  */
629
630 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
631                               int new, struct buffer_head **bhp)
632 {
633         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
634         struct gfs2_glock *gl = ip->i_gl;
635         struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
636         int in_cache = 0;
637
638         spin_lock(&ip->i_spin);
639         if (*bh_slot && (*bh_slot)->b_blocknr == num) {
640                 bh = *bh_slot;
641                 get_bh(bh);
642                 in_cache = 1;
643         }
644         spin_unlock(&ip->i_spin);
645
646         if (!bh)
647                 bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE);
648
649         if (!bh)
650                 return -ENOBUFS;
651
652         if (new) {
653                 if (gfs2_assert_warn(sdp, height))
654                         goto err;
655                 meta_prep_new(bh);
656                 gfs2_trans_add_bh(ip->i_gl, bh, 1);
657                 gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
658                 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
659         } else {
660                 u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
661                 if (!buffer_uptodate(bh)) {
662                         ll_rw_block(READ, 1, &bh);
663                         if (gfs2_meta_wait(sdp, bh))
664                                 goto err;
665                 }
666                 if (gfs2_metatype_check(sdp, bh, mtype))
667                         goto err;
668         }
669
670         if (!in_cache) {
671                 spin_lock(&ip->i_spin);
672                 if (*bh_slot)
673                         brelse(*bh_slot);
674                 *bh_slot = bh;
675                 get_bh(bh);
676                 spin_unlock(&ip->i_spin);
677         }
678
679         *bhp = bh;
680         return 0;
681 err:
682         brelse(bh);
683         return -EIO;
684 }
685
686 /**
687  * gfs2_meta_ra - start readahead on an extent of a file
688  * @gl: the glock the blocks belong to
689  * @dblock: the starting disk block
690  * @extlen: the number of blocks in the extent
691  *
692  * returns: the first buffer in the extent
693  */
694
695 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
696 {
697         struct gfs2_sbd *sdp = gl->gl_sbd;
698         struct inode *aspace = gl->gl_aspace;
699         struct buffer_head *first_bh, *bh;
700         u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
701                           sdp->sd_sb.sb_bsize_shift;
702
703         BUG_ON(!extlen);
704
705         if (max_ra < 1)
706                 max_ra = 1;
707         if (extlen > max_ra)
708                 extlen = max_ra;
709
710         first_bh = getbuf(sdp, aspace, dblock, CREATE);
711
712         if (buffer_uptodate(first_bh))
713                 goto out;
714         if (!buffer_locked(first_bh))
715                 ll_rw_block(READ, 1, &first_bh);
716
717         dblock++;
718         extlen--;
719
720         while (extlen) {
721                 bh = getbuf(sdp, aspace, dblock, CREATE);
722
723                 if (!buffer_uptodate(bh) && !buffer_locked(bh))
724                         ll_rw_block(READA, 1, &bh);
725                 brelse(bh);
726                 dblock++;
727                 extlen--;
728                 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
729                         goto out;
730         }
731
732         wait_on_buffer(first_bh);
733 out:
734         return first_bh;
735 }
736
737 /**
738  * gfs2_meta_syncfs - sync all the buffers in a filesystem
739  * @sdp: the filesystem
740  *
741  */
742
743 void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
744 {
745         gfs2_log_flush(sdp, NULL);
746         for (;;) {
747                 gfs2_ail1_start(sdp, DIO_ALL);
748                 if (gfs2_ail1_empty(sdp, DIO_ALL))
749                         break;
750                 msleep(10);
751         }
752 }
753