2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/crc32.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/freezer.h>
20 #include <linux/bio.h>
21 #include <linux/writeback.h>
32 #include "trace_gfs2.h"
37 * gfs2_struct2blk - compute stuff
38 * @sdp: the filesystem
39 * @nstruct: the number of structures
40 * @ssize: the size of the structures
42 * Compute the number of log descriptor blocks needed to hold a certain number
43 * of structures of a certain size.
45 * Returns: the number of blocks needed (minimum is always 1)
48 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
52 unsigned int first, second;
55 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
57 if (nstruct > first) {
58 second = (sdp->sd_sb.sb_bsize -
59 sizeof(struct gfs2_meta_header)) / ssize;
60 blks += DIV_ROUND_UP(nstruct - first, second);
67 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
68 * @mapping: The associated mapping (maybe NULL)
69 * @bd: The gfs2_bufdata to remove
71 * The ail lock _must_ be held when calling this function
75 void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
78 list_del_init(&bd->bd_ail_st_list);
79 list_del_init(&bd->bd_ail_gl_list);
80 atomic_dec(&bd->bd_gl->gl_ail_count);
85 * gfs2_ail1_start_one - Start I/O on a part of the AIL
86 * @sdp: the filesystem
87 * @wbc: The writeback control structure
88 * @ai: The ail structure
92 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
93 struct writeback_control *wbc,
95 __releases(&sdp->sd_ail_lock)
96 __acquires(&sdp->sd_ail_lock)
98 struct gfs2_glock *gl = NULL;
99 struct address_space *mapping;
100 struct gfs2_bufdata *bd, *s;
101 struct buffer_head *bh;
103 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, bd_ail_st_list) {
106 gfs2_assert(sdp, bd->bd_ail == ai);
108 if (!buffer_busy(bh)) {
109 if (!buffer_uptodate(bh))
110 gfs2_io_error_bh(sdp, bh);
111 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
115 if (!buffer_dirty(bh))
120 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
121 mapping = bh->b_page->mapping;
124 spin_unlock(&sdp->sd_ail_lock);
125 generic_writepages(mapping, wbc);
126 spin_lock(&sdp->sd_ail_lock);
127 if (wbc->nr_to_write <= 0)
137 * gfs2_ail1_flush - start writeback of some ail1 entries
138 * @sdp: The super block
139 * @wbc: The writeback control structure
141 * Writes back some ail1 entries, according to the limits in the
142 * writeback control structure
145 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
147 struct list_head *head = &sdp->sd_ail1_list;
150 trace_gfs2_ail_flush(sdp, wbc, 1);
151 spin_lock(&sdp->sd_ail_lock);
153 list_for_each_entry_reverse(ai, head, ai_list) {
154 if (wbc->nr_to_write <= 0)
156 if (gfs2_ail1_start_one(sdp, wbc, ai))
159 spin_unlock(&sdp->sd_ail_lock);
160 trace_gfs2_ail_flush(sdp, wbc, 0);
164 * gfs2_ail1_start - start writeback of all ail1 entries
165 * @sdp: The superblock
168 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
170 struct writeback_control wbc = {
171 .sync_mode = WB_SYNC_NONE,
172 .nr_to_write = LONG_MAX,
174 .range_end = LLONG_MAX,
177 return gfs2_ail1_flush(sdp, &wbc);
181 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
182 * @sdp: the filesystem
187 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
189 struct gfs2_bufdata *bd, *s;
190 struct buffer_head *bh;
192 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
195 gfs2_assert(sdp, bd->bd_ail == ai);
198 if (!buffer_uptodate(bh))
199 gfs2_io_error_bh(sdp, bh);
200 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
206 * gfs2_ail1_empty - Try to empty the ail1 lists
207 * @sdp: The superblock
209 * Tries to empty the ail1 lists, starting with the oldest first
212 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
214 struct gfs2_ail *ai, *s;
217 spin_lock(&sdp->sd_ail_lock);
218 list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
219 gfs2_ail1_empty_one(sdp, ai);
220 if (list_empty(&ai->ai_ail1_list))
221 list_move(&ai->ai_list, &sdp->sd_ail2_list);
225 ret = list_empty(&sdp->sd_ail1_list);
226 spin_unlock(&sdp->sd_ail_lock);
233 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
234 * @sdp: the filesystem
239 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
241 struct list_head *head = &ai->ai_ail2_list;
242 struct gfs2_bufdata *bd;
244 while (!list_empty(head)) {
245 bd = list_entry(head->prev, struct gfs2_bufdata,
247 gfs2_assert(sdp, bd->bd_ail == ai);
248 gfs2_remove_from_ail(bd);
252 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
254 struct gfs2_ail *ai, *safe;
255 unsigned int old_tail = sdp->sd_log_tail;
256 int wrap = (new_tail < old_tail);
259 spin_lock(&sdp->sd_ail_lock);
261 list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
262 a = (old_tail <= ai->ai_first);
263 b = (ai->ai_first < new_tail);
264 rm = (wrap) ? (a || b) : (a && b);
268 gfs2_ail2_empty_one(sdp, ai);
269 list_del(&ai->ai_list);
270 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
271 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
275 spin_unlock(&sdp->sd_ail_lock);
279 * gfs2_log_reserve - Make a log reservation
280 * @sdp: The GFS2 superblock
281 * @blks: The number of blocks to reserve
283 * Note that we never give out the last few blocks of the journal. Thats
284 * due to the fact that there is a small number of header blocks
285 * associated with each log flush. The exact number can't be known until
286 * flush time, so we ensure that we have just enough free blocks at all
287 * times to avoid running out during a log flush.
289 * We no longer flush the log here, instead we wake up logd to do that
290 * for us. To avoid the thundering herd and to ensure that we deal fairly
291 * with queued waiters, we use an exclusive wait. This means that when we
292 * get woken with enough journal space to get our reservation, we need to
293 * wake the next waiter on the list.
298 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
300 unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
301 unsigned wanted = blks + reserved_blks;
304 unsigned int free_blocks;
306 if (gfs2_assert_warn(sdp, blks) ||
307 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
310 free_blocks = atomic_read(&sdp->sd_log_blks_free);
311 if (unlikely(free_blocks <= wanted)) {
313 prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
314 TASK_UNINTERRUPTIBLE);
315 wake_up(&sdp->sd_logd_waitq);
317 if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
319 free_blocks = atomic_read(&sdp->sd_log_blks_free);
320 } while(free_blocks <= wanted);
321 finish_wait(&sdp->sd_log_waitq, &wait);
323 if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
324 free_blocks - blks) != free_blocks)
326 trace_gfs2_log_blocks(sdp, -blks);
329 * If we waited, then so might others, wake them up _after_ we get
330 * our share of the log.
332 if (unlikely(did_wait))
333 wake_up(&sdp->sd_log_waitq);
335 down_read(&sdp->sd_log_flush_lock);
340 static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
342 struct gfs2_journal_extent *je;
344 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
345 if (lbn >= je->lblock && lbn < je->lblock + je->blocks)
346 return je->dblock + lbn - je->lblock;
353 * log_distance - Compute distance between two journal blocks
354 * @sdp: The GFS2 superblock
355 * @newer: The most recent journal block of the pair
356 * @older: The older journal block of the pair
358 * Compute the distance (in the journal direction) between two
359 * blocks in the journal
361 * Returns: the distance in blocks
364 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
369 dist = newer - older;
371 dist += sdp->sd_jdesc->jd_blocks;
377 * calc_reserved - Calculate the number of blocks to reserve when
378 * refunding a transaction's unused buffers.
379 * @sdp: The GFS2 superblock
381 * This is complex. We need to reserve room for all our currently used
382 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
383 * all our journaled data buffers for journaled files (e.g. files in the
384 * meta_fs like rindex, or files for which chattr +j was done.)
385 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
386 * will count it as free space (sd_log_blks_free) and corruption will follow.
388 * We can have metadata bufs and jdata bufs in the same journal. So each
389 * type gets its own log header, for which we need to reserve a block.
390 * In fact, each type has the potential for needing more than one header
391 * in cases where we have more buffers than will fit on a journal page.
392 * Metadata journal entries take up half the space of journaled buffer entries.
393 * Thus, metadata entries have buf_limit (502) and journaled buffers have
394 * databuf_limit (251) before they cause a wrap around.
396 * Also, we need to reserve blocks for revoke journal entries and one for an
397 * overall header for the lot.
399 * Returns: the number of blocks reserved
401 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
403 unsigned int reserved = 0;
404 unsigned int mbuf_limit, metabufhdrs_needed;
405 unsigned int dbuf_limit, databufhdrs_needed;
406 unsigned int revokes = 0;
408 mbuf_limit = buf_limit(sdp);
409 metabufhdrs_needed = (sdp->sd_log_commited_buf +
410 (mbuf_limit - 1)) / mbuf_limit;
411 dbuf_limit = databuf_limit(sdp);
412 databufhdrs_needed = (sdp->sd_log_commited_databuf +
413 (dbuf_limit - 1)) / dbuf_limit;
415 if (sdp->sd_log_commited_revoke > 0)
416 revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
419 reserved = sdp->sd_log_commited_buf + metabufhdrs_needed +
420 sdp->sd_log_commited_databuf + databufhdrs_needed +
422 /* One for the overall header */
428 static unsigned int current_tail(struct gfs2_sbd *sdp)
433 spin_lock(&sdp->sd_ail_lock);
435 if (list_empty(&sdp->sd_ail1_list)) {
436 tail = sdp->sd_log_head;
438 ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
442 spin_unlock(&sdp->sd_ail_lock);
447 void gfs2_log_incr_head(struct gfs2_sbd *sdp)
449 if (sdp->sd_log_flush_head == sdp->sd_log_tail)
450 BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head);
452 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
453 sdp->sd_log_flush_head = 0;
454 sdp->sd_log_flush_wrapped = 1;
459 * gfs2_log_write_endio - End of I/O for a log buffer
460 * @bh: The buffer head
461 * @uptodate: I/O Status
465 static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
467 struct gfs2_sbd *sdp = bh->b_private;
468 bh->b_private = NULL;
470 end_buffer_write_sync(bh, uptodate);
471 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
472 wake_up(&sdp->sd_log_flush_wait);
476 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
477 * @sdp: The GFS2 superblock
479 * Returns: the buffer_head
482 struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
484 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
485 struct buffer_head *bh;
487 bh = sb_getblk(sdp->sd_vfs, blkno);
489 memset(bh->b_data, 0, bh->b_size);
490 set_buffer_uptodate(bh);
491 clear_buffer_dirty(bh);
492 gfs2_log_incr_head(sdp);
493 atomic_inc(&sdp->sd_log_in_flight);
495 bh->b_end_io = gfs2_log_write_endio;
501 * gfs2_fake_write_endio -
502 * @bh: The buffer head
503 * @uptodate: The I/O Status
507 static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
509 struct buffer_head *real_bh = bh->b_private;
510 struct gfs2_bufdata *bd = real_bh->b_private;
511 struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
513 end_buffer_write_sync(bh, uptodate);
514 free_buffer_head(bh);
515 unlock_buffer(real_bh);
517 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
518 wake_up(&sdp->sd_log_flush_wait);
522 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
523 * @sdp: the filesystem
524 * @data: the data the buffer_head should point to
526 * Returns: the log buffer descriptor
529 struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
530 struct buffer_head *real)
532 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
533 struct buffer_head *bh;
535 bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
536 atomic_set(&bh->b_count, 1);
537 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
538 set_bh_page(bh, real->b_page, bh_offset(real));
539 bh->b_blocknr = blkno;
540 bh->b_size = sdp->sd_sb.sb_bsize;
541 bh->b_bdev = sdp->sd_vfs->s_bdev;
542 bh->b_private = real;
543 bh->b_end_io = gfs2_fake_write_endio;
545 gfs2_log_incr_head(sdp);
546 atomic_inc(&sdp->sd_log_in_flight);
551 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
553 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
555 ail2_empty(sdp, new_tail);
557 atomic_add(dist, &sdp->sd_log_blks_free);
558 trace_gfs2_log_blocks(sdp, dist);
559 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
560 sdp->sd_jdesc->jd_blocks);
562 sdp->sd_log_tail = new_tail;
566 * log_write_header - Get and initialize a journal header buffer
567 * @sdp: The GFS2 superblock
569 * Returns: the initialized log buffer descriptor
572 static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
574 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
575 struct buffer_head *bh;
576 struct gfs2_log_header *lh;
580 bh = sb_getblk(sdp->sd_vfs, blkno);
582 memset(bh->b_data, 0, bh->b_size);
583 set_buffer_uptodate(bh);
584 clear_buffer_dirty(bh);
586 gfs2_ail1_empty(sdp);
587 tail = current_tail(sdp);
589 lh = (struct gfs2_log_header *)bh->b_data;
590 memset(lh, 0, sizeof(struct gfs2_log_header));
591 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
592 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
593 lh->lh_header.__pad0 = cpu_to_be64(0);
594 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
595 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
596 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
597 lh->lh_flags = cpu_to_be32(flags);
598 lh->lh_tail = cpu_to_be32(tail);
599 lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
600 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
601 lh->lh_hash = cpu_to_be32(hash);
603 bh->b_end_io = end_buffer_write_sync;
605 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
606 submit_bh(WRITE_SYNC | REQ_META, bh);
608 submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
611 if (!buffer_uptodate(bh))
612 gfs2_io_error_bh(sdp, bh);
615 if (sdp->sd_log_tail != tail)
616 log_pull_tail(sdp, tail);
618 gfs2_assert_withdraw(sdp, !pull);
620 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
621 gfs2_log_incr_head(sdp);
624 static void log_flush_commit(struct gfs2_sbd *sdp)
628 if (atomic_read(&sdp->sd_log_in_flight)) {
630 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
631 TASK_UNINTERRUPTIBLE);
632 if (atomic_read(&sdp->sd_log_in_flight))
634 } while(atomic_read(&sdp->sd_log_in_flight));
635 finish_wait(&sdp->sd_log_flush_wait, &wait);
638 log_write_header(sdp, 0, 0);
641 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
643 struct gfs2_bufdata *bd;
644 struct buffer_head *bh;
648 while (!list_empty(&sdp->sd_log_le_ordered)) {
649 bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list);
650 list_move(&bd->bd_le.le_list, &written);
652 if (!buffer_dirty(bh))
655 gfs2_log_unlock(sdp);
657 if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
658 bh->b_end_io = end_buffer_write_sync;
659 submit_bh(WRITE_SYNC, bh);
666 list_splice(&written, &sdp->sd_log_le_ordered);
667 gfs2_log_unlock(sdp);
670 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
672 struct gfs2_bufdata *bd;
673 struct buffer_head *bh;
676 while (!list_empty(&sdp->sd_log_le_ordered)) {
677 bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list);
679 if (buffer_locked(bh)) {
681 gfs2_log_unlock(sdp);
687 list_del_init(&bd->bd_le.le_list);
689 gfs2_log_unlock(sdp);
693 * gfs2_log_flush - flush incore transaction(s)
694 * @sdp: the filesystem
695 * @gl: The glock structure to flush. If NULL, flush the whole incore log
699 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
703 down_write(&sdp->sd_log_flush_lock);
705 /* Log might have been flushed while we waited for the flush lock */
706 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
707 up_write(&sdp->sd_log_flush_lock);
710 trace_gfs2_log_flush(sdp, 1);
712 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
713 INIT_LIST_HEAD(&ai->ai_ail1_list);
714 INIT_LIST_HEAD(&ai->ai_ail2_list);
716 if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
717 printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
718 sdp->sd_log_commited_buf);
719 gfs2_assert_withdraw(sdp, 0);
721 if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) {
722 printk(KERN_INFO "GFS2: log databuf %u %u\n",
723 sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf);
724 gfs2_assert_withdraw(sdp, 0);
726 gfs2_assert_withdraw(sdp,
727 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
729 sdp->sd_log_flush_head = sdp->sd_log_head;
730 sdp->sd_log_flush_wrapped = 0;
731 ai->ai_first = sdp->sd_log_flush_head;
733 gfs2_ordered_write(sdp);
734 lops_before_commit(sdp);
735 gfs2_ordered_wait(sdp);
737 if (sdp->sd_log_head != sdp->sd_log_flush_head)
738 log_flush_commit(sdp);
739 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
741 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
742 trace_gfs2_log_blocks(sdp, -1);
743 gfs2_log_unlock(sdp);
744 log_write_header(sdp, 0, PULL);
746 lops_after_commit(sdp, ai);
749 sdp->sd_log_head = sdp->sd_log_flush_head;
750 sdp->sd_log_blks_reserved = 0;
751 sdp->sd_log_commited_buf = 0;
752 sdp->sd_log_commited_databuf = 0;
753 sdp->sd_log_commited_revoke = 0;
755 spin_lock(&sdp->sd_ail_lock);
756 if (!list_empty(&ai->ai_ail1_list)) {
757 list_add(&ai->ai_list, &sdp->sd_ail1_list);
760 spin_unlock(&sdp->sd_ail_lock);
761 gfs2_log_unlock(sdp);
762 trace_gfs2_log_flush(sdp, 0);
763 up_write(&sdp->sd_log_flush_lock);
768 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
770 unsigned int reserved;
775 sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
776 sdp->sd_log_commited_databuf += tr->tr_num_databuf_new -
777 tr->tr_num_databuf_rm;
778 gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) ||
779 (((int)sdp->sd_log_commited_databuf) >= 0));
780 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
781 reserved = calc_reserved(sdp);
782 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
783 unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
784 atomic_add(unused, &sdp->sd_log_blks_free);
785 trace_gfs2_log_blocks(sdp, unused);
786 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
787 sdp->sd_jdesc->jd_blocks);
788 sdp->sd_log_blks_reserved = reserved;
790 gfs2_log_unlock(sdp);
793 static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
795 struct list_head *head = &tr->tr_list_buf;
796 struct gfs2_bufdata *bd;
799 while (!list_empty(head)) {
800 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
801 list_del_init(&bd->bd_list_tr);
804 gfs2_log_unlock(sdp);
805 gfs2_assert_warn(sdp, !tr->tr_num_buf);
809 * gfs2_log_commit - Commit a transaction to the log
810 * @sdp: the filesystem
811 * @tr: the transaction
813 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
814 * or the total number of used blocks (pinned blocks plus AIL blocks)
815 * is greater than thresh2.
817 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
823 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
826 buf_lo_incore_commit(sdp, tr);
828 up_read(&sdp->sd_log_flush_lock);
830 if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
831 ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
832 atomic_read(&sdp->sd_log_thresh2)))
833 wake_up(&sdp->sd_logd_waitq);
837 * gfs2_log_shutdown - write a shutdown header into a journal
838 * @sdp: the filesystem
842 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
844 down_write(&sdp->sd_log_flush_lock);
846 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
847 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
848 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
849 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
850 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
851 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
853 sdp->sd_log_flush_head = sdp->sd_log_head;
854 sdp->sd_log_flush_wrapped = 0;
856 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT,
857 (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL);
859 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
860 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
861 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
863 sdp->sd_log_head = sdp->sd_log_flush_head;
864 sdp->sd_log_tail = sdp->sd_log_head;
866 up_write(&sdp->sd_log_flush_lock);
871 * gfs2_meta_syncfs - sync all the buffers in a filesystem
872 * @sdp: the filesystem
876 void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
878 gfs2_log_flush(sdp, NULL);
880 gfs2_ail1_start(sdp);
881 if (gfs2_ail1_empty(sdp))
887 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
889 return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
892 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
894 unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
895 return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
899 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
900 * @sdp: Pointer to GFS2 superblock
902 * Also, periodically check to make sure that we're using the most recent
906 int gfs2_logd(void *data)
908 struct gfs2_sbd *sdp = data;
913 while (!kthread_should_stop()) {
915 preflush = atomic_read(&sdp->sd_log_pinned);
916 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
917 gfs2_ail1_empty(sdp);
918 gfs2_log_flush(sdp, NULL);
921 if (gfs2_ail_flush_reqd(sdp)) {
922 gfs2_ail1_start(sdp);
924 gfs2_ail1_empty(sdp);
925 gfs2_log_flush(sdp, NULL);
928 wake_up(&sdp->sd_log_waitq);
929 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
930 if (freezing(current))
934 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
936 if (!gfs2_ail_flush_reqd(sdp) &&
937 !gfs2_jrnl_flush_reqd(sdp) &&
938 !kthread_should_stop())
939 t = schedule_timeout(t);
940 } while(t && !gfs2_ail_flush_reqd(sdp) &&
941 !gfs2_jrnl_flush_reqd(sdp) &&
942 !kthread_should_stop());
943 finish_wait(&sdp->sd_logd_waitq, &wait);