2 * linux/fs/jbd2/commit.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Journal commit routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
16 #include <linux/time.h>
18 #include <linux/jbd2.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bio.h>
28 #include <linux/blkdev.h>
29 #include <linux/bitops.h>
30 #include <trace/events/jbd2.h>
31 #include <asm/system.h>
34 * Default IO end handler for temporary BJ_IO buffer_heads.
36 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
40 set_buffer_uptodate(bh);
42 clear_buffer_uptodate(bh);
47 * When an ext4 file is truncated, it is possible that some pages are not
48 * successfully freed, because they are attached to a committing transaction.
49 * After the transaction commits, these pages are left on the LRU, with no
50 * ->mapping, and with attached buffers. These pages are trivially reclaimable
51 * by the VM, but their apparent absence upsets the VM accounting, and it makes
52 * the numbers in /proc/meminfo look odd.
54 * So here, we have a buffer which has just come off the forget list. Look to
55 * see if we can strip all buffers from the backing page.
57 * Called under lock_journal(), and possibly under journal_datalist_lock. The
58 * caller provided us with a ref against the buffer, and we drop that here.
60 static void release_buffer_page(struct buffer_head *bh)
66 if (atomic_read(&bh->b_count) != 1)
74 /* OK, it's a truncated page */
75 if (!trylock_page(page))
80 try_to_free_buffers(page);
82 page_cache_release(page);
90 * Done it all: now submit the commit record. We should have
91 * cleaned up our previous buffers by now, so if we are in abort
92 * mode we can now just skip the rest of the journal write
95 * Returns 1 if the journal needs to be aborted or 0 on success
97 static int journal_submit_commit_record(journal_t *journal,
98 transaction_t *commit_transaction,
99 struct buffer_head **cbh,
102 struct journal_head *descriptor;
103 struct commit_header *tmp;
104 struct buffer_head *bh;
106 struct timespec now = current_kernel_time();
110 if (is_journal_aborted(journal))
113 descriptor = jbd2_journal_get_descriptor_buffer(journal);
117 bh = jh2bh(descriptor);
119 tmp = (struct commit_header *)bh->b_data;
120 tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
121 tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
122 tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
123 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
124 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
126 if (JBD2_HAS_COMPAT_FEATURE(journal,
127 JBD2_FEATURE_COMPAT_CHECKSUM)) {
128 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
129 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
130 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
133 JBUFFER_TRACE(descriptor, "submit commit block");
135 clear_buffer_dirty(bh);
136 set_buffer_uptodate(bh);
137 bh->b_end_io = journal_end_buffer_io_sync;
139 if (journal->j_flags & JBD2_BARRIER &&
140 !JBD2_HAS_INCOMPAT_FEATURE(journal,
141 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
142 ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
144 ret = submit_bh(WRITE_SYNC, bh);
151 * This function along with journal_submit_commit_record
152 * allows to write the commit record asynchronously.
154 static int journal_wait_on_commit_record(journal_t *journal,
155 struct buffer_head *bh)
159 clear_buffer_dirty(bh);
162 if (unlikely(!buffer_uptodate(bh)))
164 put_bh(bh); /* One for getblk() */
165 jbd2_journal_put_journal_head(bh2jh(bh));
171 * write the filemap data using writepage() address_space_operations.
172 * We don't do block allocation here even for delalloc. We don't
173 * use writepages() because with dealyed allocation we may be doing
174 * block allocation in writepages().
176 static int journal_submit_inode_data_buffers(struct address_space *mapping)
179 struct writeback_control wbc = {
180 .sync_mode = WB_SYNC_ALL,
181 .nr_to_write = mapping->nrpages * 2,
183 .range_end = i_size_read(mapping->host),
186 ret = generic_writepages(mapping, &wbc);
191 * Submit all the data buffers of inode associated with the transaction to
194 * We are in a committing transaction. Therefore no new inode can be added to
195 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
196 * operate on from being released while we write out pages.
198 static int journal_submit_data_buffers(journal_t *journal,
199 transaction_t *commit_transaction)
201 struct jbd2_inode *jinode;
203 struct address_space *mapping;
205 spin_lock(&journal->j_list_lock);
206 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
207 mapping = jinode->i_vfs_inode->i_mapping;
208 set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
209 spin_unlock(&journal->j_list_lock);
211 * submit the inode data buffers. We use writepage
212 * instead of writepages. Because writepages can do
213 * block allocation with delalloc. We need to write
214 * only allocated blocks here.
216 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
217 err = journal_submit_inode_data_buffers(mapping);
220 spin_lock(&journal->j_list_lock);
221 J_ASSERT(jinode->i_transaction == commit_transaction);
222 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
223 smp_mb__after_clear_bit();
224 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
226 spin_unlock(&journal->j_list_lock);
231 * Wait for data submitted for writeout, refile inodes to proper
232 * transaction if needed.
235 static int journal_finish_inode_data_buffers(journal_t *journal,
236 transaction_t *commit_transaction)
238 struct jbd2_inode *jinode, *next_i;
241 /* For locking, see the comment in journal_submit_data_buffers() */
242 spin_lock(&journal->j_list_lock);
243 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
244 set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
245 spin_unlock(&journal->j_list_lock);
246 err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
249 * Because AS_EIO is cleared by
250 * filemap_fdatawait_range(), set it again so
251 * that user process can get -EIO from fsync().
254 &jinode->i_vfs_inode->i_mapping->flags);
259 spin_lock(&journal->j_list_lock);
260 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
261 smp_mb__after_clear_bit();
262 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
265 /* Now refile inode to proper lists */
266 list_for_each_entry_safe(jinode, next_i,
267 &commit_transaction->t_inode_list, i_list) {
268 list_del(&jinode->i_list);
269 if (jinode->i_next_transaction) {
270 jinode->i_transaction = jinode->i_next_transaction;
271 jinode->i_next_transaction = NULL;
272 list_add(&jinode->i_list,
273 &jinode->i_transaction->t_inode_list);
275 jinode->i_transaction = NULL;
278 spin_unlock(&journal->j_list_lock);
283 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
285 struct page *page = bh->b_page;
289 addr = kmap_atomic(page, KM_USER0);
290 checksum = crc32_be(crc32_sum,
291 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
292 kunmap_atomic(addr, KM_USER0);
297 static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
298 unsigned long long block)
300 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
301 if (tag_bytes > JBD2_TAG_SIZE32)
302 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
306 * jbd2_journal_commit_transaction
308 * The primary function for committing a transaction to the log. This
309 * function is called by the journal thread to begin a complete commit.
311 void jbd2_journal_commit_transaction(journal_t *journal)
313 struct transaction_stats_s stats;
314 transaction_t *commit_transaction;
315 struct journal_head *jh, *new_jh, *descriptor;
316 struct buffer_head **wbuf = journal->j_wbuf;
320 unsigned long long blocknr;
324 journal_header_t *header;
325 journal_block_tag_t *tag = NULL;
330 int tag_bytes = journal_tag_bytes(journal);
331 struct buffer_head *cbh = NULL; /* For transactional checksums */
332 __u32 crc32_sum = ~0;
333 struct blk_plug plug;
336 * First job: lock down the current transaction and wait for
337 * all outstanding updates to complete.
340 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
341 if (journal->j_flags & JBD2_FLUSHED) {
342 jbd_debug(3, "super block updated\n");
343 mutex_lock(&journal->j_checkpoint_mutex);
345 * We hold j_checkpoint_mutex so tail cannot change under us.
346 * We don't need any special data guarantees for writing sb
347 * since journal is empty and it is ok for write to be
348 * flushed only with transaction commit.
350 jbd2_journal_update_sb_log_tail(journal,
351 journal->j_tail_sequence,
354 mutex_unlock(&journal->j_checkpoint_mutex);
356 jbd_debug(3, "superblock not updated\n");
359 J_ASSERT(journal->j_running_transaction != NULL);
360 J_ASSERT(journal->j_committing_transaction == NULL);
362 commit_transaction = journal->j_running_transaction;
363 J_ASSERT(commit_transaction->t_state == T_RUNNING);
365 trace_jbd2_start_commit(journal, commit_transaction);
366 jbd_debug(1, "JBD2: starting commit of transaction %d\n",
367 commit_transaction->t_tid);
369 write_lock(&journal->j_state_lock);
370 commit_transaction->t_state = T_LOCKED;
372 trace_jbd2_commit_locking(journal, commit_transaction);
373 stats.run.rs_wait = commit_transaction->t_max_wait;
374 stats.run.rs_locked = jiffies;
375 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
376 stats.run.rs_locked);
378 spin_lock(&commit_transaction->t_handle_lock);
379 while (atomic_read(&commit_transaction->t_updates)) {
382 prepare_to_wait(&journal->j_wait_updates, &wait,
383 TASK_UNINTERRUPTIBLE);
384 if (atomic_read(&commit_transaction->t_updates)) {
385 spin_unlock(&commit_transaction->t_handle_lock);
386 write_unlock(&journal->j_state_lock);
388 write_lock(&journal->j_state_lock);
389 spin_lock(&commit_transaction->t_handle_lock);
391 finish_wait(&journal->j_wait_updates, &wait);
393 spin_unlock(&commit_transaction->t_handle_lock);
395 J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
396 journal->j_max_transaction_buffers);
399 * First thing we are allowed to do is to discard any remaining
400 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
401 * that there are no such buffers: if a large filesystem
402 * operation like a truncate needs to split itself over multiple
403 * transactions, then it may try to do a jbd2_journal_restart() while
404 * there are still BJ_Reserved buffers outstanding. These must
405 * be released cleanly from the current transaction.
407 * In this case, the filesystem must still reserve write access
408 * again before modifying the buffer in the new transaction, but
409 * we do not require it to remember exactly which old buffers it
410 * has reserved. This is consistent with the existing behaviour
411 * that multiple jbd2_journal_get_write_access() calls to the same
412 * buffer are perfectly permissible.
414 while (commit_transaction->t_reserved_list) {
415 jh = commit_transaction->t_reserved_list;
416 JBUFFER_TRACE(jh, "reserved, unused: refile");
418 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
419 * leave undo-committed data.
421 if (jh->b_committed_data) {
422 struct buffer_head *bh = jh2bh(jh);
424 jbd_lock_bh_state(bh);
425 jbd2_free(jh->b_committed_data, bh->b_size);
426 jh->b_committed_data = NULL;
427 jbd_unlock_bh_state(bh);
429 jbd2_journal_refile_buffer(journal, jh);
433 * Now try to drop any written-back buffers from the journal's
434 * checkpoint lists. We do this *before* commit because it potentially
437 spin_lock(&journal->j_list_lock);
438 __jbd2_journal_clean_checkpoint_list(journal, false);
439 spin_unlock(&journal->j_list_lock);
441 jbd_debug(3, "JBD2: commit phase 1\n");
444 * Switch to a new revoke table.
446 jbd2_journal_switch_revoke_table(journal);
448 trace_jbd2_commit_flushing(journal, commit_transaction);
449 stats.run.rs_flushing = jiffies;
450 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
451 stats.run.rs_flushing);
453 commit_transaction->t_state = T_FLUSH;
454 journal->j_committing_transaction = commit_transaction;
455 journal->j_running_transaction = NULL;
456 start_time = ktime_get();
457 commit_transaction->t_log_start = journal->j_head;
458 wake_up(&journal->j_wait_transaction_locked);
459 write_unlock(&journal->j_state_lock);
461 jbd_debug(3, "JBD2: commit phase 2\n");
464 * Now start flushing things to disk, in the order they appear
465 * on the transaction lists. Data blocks go first.
467 err = journal_submit_data_buffers(journal, commit_transaction);
469 jbd2_journal_abort(journal, err);
471 blk_start_plug(&plug);
472 jbd2_journal_write_revoke_records(journal, commit_transaction,
474 blk_finish_plug(&plug);
476 jbd_debug(3, "JBD2: commit phase 2\n");
479 * Way to go: we have now written out all of the data for a
480 * transaction! Now comes the tricky part: we need to write out
481 * metadata. Loop over the transaction's entire buffer list:
483 write_lock(&journal->j_state_lock);
484 commit_transaction->t_state = T_COMMIT;
485 write_unlock(&journal->j_state_lock);
487 trace_jbd2_commit_logging(journal, commit_transaction);
488 stats.run.rs_logging = jiffies;
489 stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
490 stats.run.rs_logging);
491 stats.run.rs_blocks =
492 atomic_read(&commit_transaction->t_outstanding_credits);
493 stats.run.rs_blocks_logged = 0;
495 J_ASSERT(commit_transaction->t_nr_buffers <=
496 atomic_read(&commit_transaction->t_outstanding_credits));
501 blk_start_plug(&plug);
502 while (commit_transaction->t_buffers) {
504 /* Find the next buffer to be journaled... */
506 jh = commit_transaction->t_buffers;
508 /* If we're in abort mode, we just un-journal the buffer and
511 if (is_journal_aborted(journal)) {
512 clear_buffer_jbddirty(jh2bh(jh));
513 JBUFFER_TRACE(jh, "journal is aborting: refile");
514 jbd2_buffer_abort_trigger(jh,
516 jh->b_frozen_triggers :
518 jbd2_journal_refile_buffer(journal, jh);
519 /* If that was the last one, we need to clean up
520 * any descriptor buffers which may have been
521 * already allocated, even if we are now
523 if (!commit_transaction->t_buffers)
524 goto start_journal_io;
528 /* Make sure we have a descriptor block in which to
529 record the metadata buffer. */
532 struct buffer_head *bh;
534 J_ASSERT (bufs == 0);
536 jbd_debug(4, "JBD2: get descriptor\n");
538 descriptor = jbd2_journal_get_descriptor_buffer(journal);
540 jbd2_journal_abort(journal, -EIO);
544 bh = jh2bh(descriptor);
545 jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
546 (unsigned long long)bh->b_blocknr, bh->b_data);
547 header = (journal_header_t *)&bh->b_data[0];
548 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
549 header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
550 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
552 tagp = &bh->b_data[sizeof(journal_header_t)];
553 space_left = bh->b_size - sizeof(journal_header_t);
555 set_buffer_jwrite(bh);
556 set_buffer_dirty(bh);
559 /* Record it so that we can wait for IO
561 BUFFER_TRACE(bh, "ph3: file as descriptor");
562 jbd2_journal_file_buffer(descriptor, commit_transaction,
566 /* Where is the buffer to be written? */
568 err = jbd2_journal_next_log_block(journal, &blocknr);
569 /* If the block mapping failed, just abandon the buffer
570 and repeat this loop: we'll fall into the
571 refile-on-abort condition above. */
573 jbd2_journal_abort(journal, err);
578 * start_this_handle() uses t_outstanding_credits to determine
579 * the free space in the log, but this counter is changed
580 * by jbd2_journal_next_log_block() also.
582 atomic_dec(&commit_transaction->t_outstanding_credits);
584 /* Bump b_count to prevent truncate from stumbling over
585 the shadowed buffer! @@@ This can go if we ever get
586 rid of the BJ_IO/BJ_Shadow pairing of buffers. */
587 atomic_inc(&jh2bh(jh)->b_count);
589 /* Make a temporary IO buffer with which to write it out
590 (this will requeue both the metadata buffer and the
591 temporary IO buffer). new_bh goes on BJ_IO*/
593 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
595 * akpm: jbd2_journal_write_metadata_buffer() sets
596 * new_bh->b_transaction to commit_transaction.
597 * We need to clean this up before we release new_bh
598 * (which is of type BJ_IO)
600 JBUFFER_TRACE(jh, "ph3: write metadata");
601 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
602 jh, &new_jh, blocknr);
604 jbd2_journal_abort(journal, flags);
607 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
608 wbuf[bufs++] = jh2bh(new_jh);
610 /* Record the new block's tag in the current descriptor
615 tag_flag |= JBD2_FLAG_ESCAPE;
617 tag_flag |= JBD2_FLAG_SAME_UUID;
619 tag = (journal_block_tag_t *) tagp;
620 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
621 tag->t_flags = cpu_to_be32(tag_flag);
623 space_left -= tag_bytes;
626 memcpy (tagp, journal->j_uuid, 16);
632 /* If there's no more to do, or if the descriptor is full,
635 if (bufs == journal->j_wbufsize ||
636 commit_transaction->t_buffers == NULL ||
637 space_left < tag_bytes + 16) {
639 jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
641 /* Write an end-of-descriptor marker before
642 submitting the IOs. "tag" still points to
643 the last tag we set up. */
645 tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
648 for (i = 0; i < bufs; i++) {
649 struct buffer_head *bh = wbuf[i];
653 if (JBD2_HAS_COMPAT_FEATURE(journal,
654 JBD2_FEATURE_COMPAT_CHECKSUM)) {
656 jbd2_checksum_data(crc32_sum, bh);
660 clear_buffer_dirty(bh);
661 set_buffer_uptodate(bh);
662 bh->b_end_io = journal_end_buffer_io_sync;
663 submit_bh(WRITE_SYNC, bh);
666 stats.run.rs_blocks_logged += bufs;
668 /* Force a new descriptor to be generated next
669 time round the loop. */
675 err = journal_finish_inode_data_buffers(journal, commit_transaction);
678 "JBD2: Detected IO errors while flushing file data "
679 "on %s\n", journal->j_devname);
680 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
681 jbd2_journal_abort(journal, err);
685 write_lock(&journal->j_state_lock);
686 J_ASSERT(commit_transaction->t_state == T_COMMIT);
687 commit_transaction->t_state = T_COMMIT_DFLUSH;
688 write_unlock(&journal->j_state_lock);
690 * If the journal is not located on the file system device,
691 * then we must flush the file system device before we issue
694 if (commit_transaction->t_need_data_flush &&
695 (journal->j_fs_dev != journal->j_dev) &&
696 (journal->j_flags & JBD2_BARRIER))
697 blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
699 /* Done it all: now write the commit record asynchronously. */
700 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
701 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
702 err = journal_submit_commit_record(journal, commit_transaction,
705 __jbd2_journal_abort_hard(journal);
708 blk_finish_plug(&plug);
710 /* Lo and behold: we have just managed to send a transaction to
711 the log. Before we can commit it, wait for the IO so far to
712 complete. Control buffers being written are on the
713 transaction's t_log_list queue, and metadata buffers are on
714 the t_iobuf_list queue.
716 Wait for the buffers in reverse order. That way we are
717 less likely to be woken up until all IOs have completed, and
718 so we incur less scheduling load.
721 jbd_debug(3, "JBD2: commit phase 3\n");
724 * akpm: these are BJ_IO, and j_list_lock is not needed.
725 * See __journal_try_to_free_buffer.
728 while (commit_transaction->t_iobuf_list != NULL) {
729 struct buffer_head *bh;
731 jh = commit_transaction->t_iobuf_list->b_tprev;
733 if (buffer_locked(bh)) {
740 if (unlikely(!buffer_uptodate(bh)))
743 clear_buffer_jwrite(bh);
745 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
746 jbd2_journal_unfile_buffer(journal, jh);
749 * ->t_iobuf_list should contain only dummy buffer_heads
750 * which were created by jbd2_journal_write_metadata_buffer().
752 BUFFER_TRACE(bh, "dumping temporary bh");
753 jbd2_journal_put_journal_head(jh);
755 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
756 free_buffer_head(bh);
758 /* We also have to unlock and free the corresponding
760 jh = commit_transaction->t_shadow_list->b_tprev;
762 clear_bit(BH_JWrite, &bh->b_state);
763 J_ASSERT_BH(bh, buffer_jbddirty(bh));
765 /* The metadata is now released for reuse, but we need
766 to remember it against this transaction so that when
767 we finally commit, we can do any checkpointing
769 JBUFFER_TRACE(jh, "file as BJ_Forget");
770 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
772 * Wake up any transactions which were waiting for this IO to
773 * complete. The barrier must be here so that changes by
774 * jbd2_journal_file_buffer() take effect before wake_up_bit()
775 * does the waitqueue check.
778 wake_up_bit(&bh->b_state, BH_Unshadow);
779 JBUFFER_TRACE(jh, "brelse shadowed buffer");
783 J_ASSERT (commit_transaction->t_shadow_list == NULL);
785 jbd_debug(3, "JBD2: commit phase 4\n");
787 /* Here we wait for the revoke record and descriptor record buffers */
789 while (commit_transaction->t_log_list != NULL) {
790 struct buffer_head *bh;
792 jh = commit_transaction->t_log_list->b_tprev;
794 if (buffer_locked(bh)) {
796 goto wait_for_ctlbuf;
799 goto wait_for_ctlbuf;
801 if (unlikely(!buffer_uptodate(bh)))
804 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
805 clear_buffer_jwrite(bh);
806 jbd2_journal_unfile_buffer(journal, jh);
807 jbd2_journal_put_journal_head(jh);
808 __brelse(bh); /* One for getblk */
809 /* AKPM: bforget here */
813 jbd2_journal_abort(journal, err);
815 jbd_debug(3, "JBD2: commit phase 5\n");
816 write_lock(&journal->j_state_lock);
817 J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
818 commit_transaction->t_state = T_COMMIT_JFLUSH;
819 write_unlock(&journal->j_state_lock);
821 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
822 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
823 err = journal_submit_commit_record(journal, commit_transaction,
826 __jbd2_journal_abort_hard(journal);
829 err = journal_wait_on_commit_record(journal, cbh);
830 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
831 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
832 journal->j_flags & JBD2_BARRIER) {
833 blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
837 jbd2_journal_abort(journal, err);
839 /* End of a transaction! Finally, we can do checkpoint
840 processing: any buffers committed as a result of this
841 transaction can be removed from any checkpoint list it was on
844 jbd_debug(3, "JBD2: commit phase 6\n");
846 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
847 J_ASSERT(commit_transaction->t_buffers == NULL);
848 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
849 J_ASSERT(commit_transaction->t_iobuf_list == NULL);
850 J_ASSERT(commit_transaction->t_shadow_list == NULL);
851 J_ASSERT(commit_transaction->t_log_list == NULL);
855 * As there are other places (journal_unmap_buffer()) adding buffers
856 * to this list we have to be careful and hold the j_list_lock.
858 spin_lock(&journal->j_list_lock);
859 while (commit_transaction->t_forget) {
860 transaction_t *cp_transaction;
861 struct buffer_head *bh;
864 jh = commit_transaction->t_forget;
865 spin_unlock(&journal->j_list_lock);
868 * Get a reference so that bh cannot be freed before we are
872 jbd_lock_bh_state(bh);
873 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
876 * If there is undo-protected committed data against
877 * this buffer, then we can remove it now. If it is a
878 * buffer needing such protection, the old frozen_data
879 * field now points to a committed version of the
880 * buffer, so rotate that field to the new committed
883 * Otherwise, we can just throw away the frozen data now.
885 * We also know that the frozen data has already fired
886 * its triggers if they exist, so we can clear that too.
888 if (jh->b_committed_data) {
889 jbd2_free(jh->b_committed_data, bh->b_size);
890 jh->b_committed_data = NULL;
891 if (jh->b_frozen_data) {
892 jh->b_committed_data = jh->b_frozen_data;
893 jh->b_frozen_data = NULL;
894 jh->b_frozen_triggers = NULL;
896 } else if (jh->b_frozen_data) {
897 jbd2_free(jh->b_frozen_data, bh->b_size);
898 jh->b_frozen_data = NULL;
899 jh->b_frozen_triggers = NULL;
902 spin_lock(&journal->j_list_lock);
903 cp_transaction = jh->b_cp_transaction;
904 if (cp_transaction) {
905 JBUFFER_TRACE(jh, "remove from old cp transaction");
906 cp_transaction->t_chp_stats.cs_dropped++;
907 __jbd2_journal_remove_checkpoint(jh);
910 /* Only re-checkpoint the buffer_head if it is marked
911 * dirty. If the buffer was added to the BJ_Forget list
912 * by jbd2_journal_forget, it may no longer be dirty and
913 * there's no point in keeping a checkpoint record for
916 /* A buffer which has been freed while still being
917 * journaled by a previous transaction may end up still
918 * being dirty here, but we want to avoid writing back
919 * that buffer in the future after the "add to orphan"
920 * operation been committed, That's not only a performance
921 * gain, it also stops aliasing problems if the buffer is
922 * left behind for writeback and gets reallocated for another
923 * use in a different page. */
924 if (buffer_freed(bh) && !jh->b_next_transaction) {
925 clear_buffer_freed(bh);
926 clear_buffer_jbddirty(bh);
929 if (buffer_jbddirty(bh)) {
930 JBUFFER_TRACE(jh, "add to new checkpointing trans");
931 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
932 if (is_journal_aborted(journal))
933 clear_buffer_jbddirty(bh);
935 J_ASSERT_BH(bh, !buffer_dirty(bh));
937 * The buffer on BJ_Forget list and not jbddirty means
938 * it has been freed by this transaction and hence it
939 * could not have been reallocated until this
940 * transaction has committed. *BUT* it could be
941 * reallocated once we have written all the data to
942 * disk and before we process the buffer on BJ_Forget
945 if (!jh->b_next_transaction)
948 JBUFFER_TRACE(jh, "refile or unfile buffer");
949 __jbd2_journal_refile_buffer(jh);
950 jbd_unlock_bh_state(bh);
952 release_buffer_page(bh); /* Drops bh reference */
955 cond_resched_lock(&journal->j_list_lock);
957 spin_unlock(&journal->j_list_lock);
959 * This is a bit sleazy. We use j_list_lock to protect transition
960 * of a transaction into T_FINISHED state and calling
961 * __jbd2_journal_drop_transaction(). Otherwise we could race with
962 * other checkpointing code processing the transaction...
964 write_lock(&journal->j_state_lock);
965 spin_lock(&journal->j_list_lock);
967 * Now recheck if some buffers did not get attached to the transaction
968 * while the lock was dropped...
970 if (commit_transaction->t_forget) {
971 spin_unlock(&journal->j_list_lock);
972 write_unlock(&journal->j_state_lock);
976 /* Done with this transaction! */
978 jbd_debug(3, "JBD2: commit phase 7\n");
980 J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
982 commit_transaction->t_start = jiffies;
983 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
984 commit_transaction->t_start);
987 * File the transaction statistics
989 stats.ts_tid = commit_transaction->t_tid;
990 stats.run.rs_handle_count =
991 atomic_read(&commit_transaction->t_handle_count);
992 trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
993 commit_transaction->t_tid, &stats.run);
996 * Calculate overall stats
998 spin_lock(&journal->j_history_lock);
999 journal->j_stats.ts_tid++;
1000 journal->j_stats.run.rs_wait += stats.run.rs_wait;
1001 journal->j_stats.run.rs_running += stats.run.rs_running;
1002 journal->j_stats.run.rs_locked += stats.run.rs_locked;
1003 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1004 journal->j_stats.run.rs_logging += stats.run.rs_logging;
1005 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1006 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1007 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1008 spin_unlock(&journal->j_history_lock);
1010 commit_transaction->t_state = T_COMMIT_CALLBACK;
1011 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1012 journal->j_commit_sequence = commit_transaction->t_tid;
1013 journal->j_committing_transaction = NULL;
1014 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1017 * weight the commit time higher than the average time so we don't
1018 * react too strongly to vast changes in the commit time
1020 if (likely(journal->j_average_commit_time))
1021 journal->j_average_commit_time = (commit_time +
1022 journal->j_average_commit_time*3) / 4;
1024 journal->j_average_commit_time = commit_time;
1026 write_unlock(&journal->j_state_lock);
1028 if (journal->j_checkpoint_transactions == NULL) {
1029 journal->j_checkpoint_transactions = commit_transaction;
1030 commit_transaction->t_cpnext = commit_transaction;
1031 commit_transaction->t_cpprev = commit_transaction;
1033 commit_transaction->t_cpnext =
1034 journal->j_checkpoint_transactions;
1035 commit_transaction->t_cpprev =
1036 commit_transaction->t_cpnext->t_cpprev;
1037 commit_transaction->t_cpnext->t_cpprev =
1039 commit_transaction->t_cpprev->t_cpnext =
1042 spin_unlock(&journal->j_list_lock);
1043 /* Drop all spin_locks because commit_callback may be block.
1044 * __journal_remove_checkpoint() can not destroy transaction
1045 * under us because it is not marked as T_FINISHED yet */
1046 if (journal->j_commit_callback)
1047 journal->j_commit_callback(journal, commit_transaction);
1049 trace_jbd2_end_commit(journal, commit_transaction);
1050 jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1051 journal->j_commit_sequence, journal->j_tail_sequence);
1053 write_lock(&journal->j_state_lock);
1054 spin_lock(&journal->j_list_lock);
1055 commit_transaction->t_state = T_FINISHED;
1056 /* Recheck checkpoint lists after j_list_lock was dropped */
1057 if (commit_transaction->t_checkpoint_list == NULL &&
1058 commit_transaction->t_checkpoint_io_list == NULL) {
1059 __jbd2_journal_drop_transaction(journal, commit_transaction);
1060 kfree(commit_transaction);
1062 spin_unlock(&journal->j_list_lock);
1063 write_unlock(&journal->j_state_lock);
1064 wake_up(&journal->j_wait_done_commit);