vb2: Fix an off by one error in 'vb2_plane_vaddr'
[pandora-kernel.git] / fs / jbd2 / commit.c
1 /*
2  * linux/fs/jbd2/commit.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Journal commit routines for the generic filesystem journaling code;
13  * part of the ext2fs journaling system.
14  */
15
16 #include <linux/time.h>
17 #include <linux/fs.h>
18 #include <linux/jbd2.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/mm.h>
22 #include <linux/pagemap.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bio.h>
28 #include <linux/blkdev.h>
29 #include <linux/bitops.h>
30 #include <trace/events/jbd2.h>
31 #include <asm/system.h>
32
33 /*
34  * Default IO end handler for temporary BJ_IO buffer_heads.
35  */
36 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
37 {
38         BUFFER_TRACE(bh, "");
39         if (uptodate)
40                 set_buffer_uptodate(bh);
41         else
42                 clear_buffer_uptodate(bh);
43         unlock_buffer(bh);
44 }
45
46 /*
47  * When an ext4 file is truncated, it is possible that some pages are not
48  * successfully freed, because they are attached to a committing transaction.
49  * After the transaction commits, these pages are left on the LRU, with no
50  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
51  * by the VM, but their apparent absence upsets the VM accounting, and it makes
52  * the numbers in /proc/meminfo look odd.
53  *
54  * So here, we have a buffer which has just come off the forget list.  Look to
55  * see if we can strip all buffers from the backing page.
56  *
57  * Called under lock_journal(), and possibly under journal_datalist_lock.  The
58  * caller provided us with a ref against the buffer, and we drop that here.
59  */
60 static void release_buffer_page(struct buffer_head *bh)
61 {
62         struct page *page;
63
64         if (buffer_dirty(bh))
65                 goto nope;
66         if (atomic_read(&bh->b_count) != 1)
67                 goto nope;
68         page = bh->b_page;
69         if (!page)
70                 goto nope;
71         if (page->mapping)
72                 goto nope;
73
74         /* OK, it's a truncated page */
75         if (!trylock_page(page))
76                 goto nope;
77
78         page_cache_get(page);
79         __brelse(bh);
80         try_to_free_buffers(page);
81         unlock_page(page);
82         page_cache_release(page);
83         return;
84
85 nope:
86         __brelse(bh);
87 }
88
89 /*
90  * Done it all: now submit the commit record.  We should have
91  * cleaned up our previous buffers by now, so if we are in abort
92  * mode we can now just skip the rest of the journal write
93  * entirely.
94  *
95  * Returns 1 if the journal needs to be aborted or 0 on success
96  */
97 static int journal_submit_commit_record(journal_t *journal,
98                                         transaction_t *commit_transaction,
99                                         struct buffer_head **cbh,
100                                         __u32 crc32_sum)
101 {
102         struct journal_head *descriptor;
103         struct commit_header *tmp;
104         struct buffer_head *bh;
105         int ret;
106         struct timespec now = current_kernel_time();
107
108         *cbh = NULL;
109
110         if (is_journal_aborted(journal))
111                 return 0;
112
113         descriptor = jbd2_journal_get_descriptor_buffer(journal);
114         if (!descriptor)
115                 return 1;
116
117         bh = jh2bh(descriptor);
118
119         tmp = (struct commit_header *)bh->b_data;
120         tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
121         tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
122         tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
123         tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
124         tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
125
126         if (JBD2_HAS_COMPAT_FEATURE(journal,
127                                     JBD2_FEATURE_COMPAT_CHECKSUM)) {
128                 tmp->h_chksum_type      = JBD2_CRC32_CHKSUM;
129                 tmp->h_chksum_size      = JBD2_CRC32_CHKSUM_SIZE;
130                 tmp->h_chksum[0]        = cpu_to_be32(crc32_sum);
131         }
132
133         JBUFFER_TRACE(descriptor, "submit commit block");
134         lock_buffer(bh);
135         clear_buffer_dirty(bh);
136         set_buffer_uptodate(bh);
137         bh->b_end_io = journal_end_buffer_io_sync;
138
139         if (journal->j_flags & JBD2_BARRIER &&
140             !JBD2_HAS_INCOMPAT_FEATURE(journal,
141                                        JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
142                 ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
143         else
144                 ret = submit_bh(WRITE_SYNC, bh);
145
146         *cbh = bh;
147         return ret;
148 }
149
150 /*
151  * This function along with journal_submit_commit_record
152  * allows to write the commit record asynchronously.
153  */
154 static int journal_wait_on_commit_record(journal_t *journal,
155                                          struct buffer_head *bh)
156 {
157         int ret = 0;
158
159         clear_buffer_dirty(bh);
160         wait_on_buffer(bh);
161
162         if (unlikely(!buffer_uptodate(bh)))
163                 ret = -EIO;
164         put_bh(bh);            /* One for getblk() */
165         jbd2_journal_put_journal_head(bh2jh(bh));
166
167         return ret;
168 }
169
170 /*
171  * write the filemap data using writepage() address_space_operations.
172  * We don't do block allocation here even for delalloc. We don't
173  * use writepages() because with dealyed allocation we may be doing
174  * block allocation in writepages().
175  */
176 static int journal_submit_inode_data_buffers(struct address_space *mapping)
177 {
178         int ret;
179         struct writeback_control wbc = {
180                 .sync_mode =  WB_SYNC_ALL,
181                 .nr_to_write = mapping->nrpages * 2,
182                 .range_start = 0,
183                 .range_end = i_size_read(mapping->host),
184         };
185
186         ret = generic_writepages(mapping, &wbc);
187         return ret;
188 }
189
190 /*
191  * Submit all the data buffers of inode associated with the transaction to
192  * disk.
193  *
194  * We are in a committing transaction. Therefore no new inode can be added to
195  * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
196  * operate on from being released while we write out pages.
197  */
198 static int journal_submit_data_buffers(journal_t *journal,
199                 transaction_t *commit_transaction)
200 {
201         struct jbd2_inode *jinode;
202         int err, ret = 0;
203         struct address_space *mapping;
204
205         spin_lock(&journal->j_list_lock);
206         list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
207                 mapping = jinode->i_vfs_inode->i_mapping;
208                 set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
209                 spin_unlock(&journal->j_list_lock);
210                 /*
211                  * submit the inode data buffers. We use writepage
212                  * instead of writepages. Because writepages can do
213                  * block allocation  with delalloc. We need to write
214                  * only allocated blocks here.
215                  */
216                 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
217                 err = journal_submit_inode_data_buffers(mapping);
218                 if (!ret)
219                         ret = err;
220                 spin_lock(&journal->j_list_lock);
221                 J_ASSERT(jinode->i_transaction == commit_transaction);
222                 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
223                 smp_mb__after_clear_bit();
224                 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
225         }
226         spin_unlock(&journal->j_list_lock);
227         return ret;
228 }
229
230 /*
231  * Wait for data submitted for writeout, refile inodes to proper
232  * transaction if needed.
233  *
234  */
235 static int journal_finish_inode_data_buffers(journal_t *journal,
236                 transaction_t *commit_transaction)
237 {
238         struct jbd2_inode *jinode, *next_i;
239         int err, ret = 0;
240
241         /* For locking, see the comment in journal_submit_data_buffers() */
242         spin_lock(&journal->j_list_lock);
243         list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
244                 set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
245                 spin_unlock(&journal->j_list_lock);
246                 err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
247                 if (err) {
248                         /*
249                          * Because AS_EIO is cleared by
250                          * filemap_fdatawait_range(), set it again so
251                          * that user process can get -EIO from fsync().
252                          */
253                         set_bit(AS_EIO,
254                                 &jinode->i_vfs_inode->i_mapping->flags);
255
256                         if (!ret)
257                                 ret = err;
258                 }
259                 spin_lock(&journal->j_list_lock);
260                 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
261                 smp_mb__after_clear_bit();
262                 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
263         }
264
265         /* Now refile inode to proper lists */
266         list_for_each_entry_safe(jinode, next_i,
267                                  &commit_transaction->t_inode_list, i_list) {
268                 list_del(&jinode->i_list);
269                 if (jinode->i_next_transaction) {
270                         jinode->i_transaction = jinode->i_next_transaction;
271                         jinode->i_next_transaction = NULL;
272                         list_add(&jinode->i_list,
273                                 &jinode->i_transaction->t_inode_list);
274                 } else {
275                         jinode->i_transaction = NULL;
276                 }
277         }
278         spin_unlock(&journal->j_list_lock);
279
280         return ret;
281 }
282
283 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
284 {
285         struct page *page = bh->b_page;
286         char *addr;
287         __u32 checksum;
288
289         addr = kmap_atomic(page, KM_USER0);
290         checksum = crc32_be(crc32_sum,
291                 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
292         kunmap_atomic(addr, KM_USER0);
293
294         return checksum;
295 }
296
297 static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
298                                    unsigned long long block)
299 {
300         tag->t_blocknr = cpu_to_be32(block & (u32)~0);
301         if (tag_bytes > JBD2_TAG_SIZE32)
302                 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
303 }
304
305 /*
306  * jbd2_journal_commit_transaction
307  *
308  * The primary function for committing a transaction to the log.  This
309  * function is called by the journal thread to begin a complete commit.
310  */
311 void jbd2_journal_commit_transaction(journal_t *journal)
312 {
313         struct transaction_stats_s stats;
314         transaction_t *commit_transaction;
315         struct journal_head *jh, *new_jh, *descriptor;
316         struct buffer_head **wbuf = journal->j_wbuf;
317         int bufs;
318         int flags;
319         int err;
320         unsigned long long blocknr;
321         ktime_t start_time;
322         u64 commit_time;
323         char *tagp = NULL;
324         journal_header_t *header;
325         journal_block_tag_t *tag = NULL;
326         int space_left = 0;
327         int first_tag = 0;
328         int tag_flag;
329         int i;
330         int tag_bytes = journal_tag_bytes(journal);
331         struct buffer_head *cbh = NULL; /* For transactional checksums */
332         __u32 crc32_sum = ~0;
333         struct blk_plug plug;
334
335         /*
336          * First job: lock down the current transaction and wait for
337          * all outstanding updates to complete.
338          */
339
340         /* Do we need to erase the effects of a prior jbd2_journal_flush? */
341         if (journal->j_flags & JBD2_FLUSHED) {
342                 jbd_debug(3, "super block updated\n");
343                 mutex_lock(&journal->j_checkpoint_mutex);
344                 /*
345                  * We hold j_checkpoint_mutex so tail cannot change under us.
346                  * We don't need any special data guarantees for writing sb
347                  * since journal is empty and it is ok for write to be
348                  * flushed only with transaction commit.
349                  */
350                 jbd2_journal_update_sb_log_tail(journal,
351                                                 journal->j_tail_sequence,
352                                                 journal->j_tail,
353                                                 WRITE_SYNC);
354                 mutex_unlock(&journal->j_checkpoint_mutex);
355         } else {
356                 jbd_debug(3, "superblock not updated\n");
357         }
358
359         J_ASSERT(journal->j_running_transaction != NULL);
360         J_ASSERT(journal->j_committing_transaction == NULL);
361
362         commit_transaction = journal->j_running_transaction;
363         J_ASSERT(commit_transaction->t_state == T_RUNNING);
364
365         trace_jbd2_start_commit(journal, commit_transaction);
366         jbd_debug(1, "JBD2: starting commit of transaction %d\n",
367                         commit_transaction->t_tid);
368
369         write_lock(&journal->j_state_lock);
370         commit_transaction->t_state = T_LOCKED;
371
372         trace_jbd2_commit_locking(journal, commit_transaction);
373         stats.run.rs_wait = commit_transaction->t_max_wait;
374         stats.run.rs_locked = jiffies;
375         stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
376                                               stats.run.rs_locked);
377
378         spin_lock(&commit_transaction->t_handle_lock);
379         while (atomic_read(&commit_transaction->t_updates)) {
380                 DEFINE_WAIT(wait);
381
382                 prepare_to_wait(&journal->j_wait_updates, &wait,
383                                         TASK_UNINTERRUPTIBLE);
384                 if (atomic_read(&commit_transaction->t_updates)) {
385                         spin_unlock(&commit_transaction->t_handle_lock);
386                         write_unlock(&journal->j_state_lock);
387                         schedule();
388                         write_lock(&journal->j_state_lock);
389                         spin_lock(&commit_transaction->t_handle_lock);
390                 }
391                 finish_wait(&journal->j_wait_updates, &wait);
392         }
393         spin_unlock(&commit_transaction->t_handle_lock);
394
395         J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
396                         journal->j_max_transaction_buffers);
397
398         /*
399          * First thing we are allowed to do is to discard any remaining
400          * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
401          * that there are no such buffers: if a large filesystem
402          * operation like a truncate needs to split itself over multiple
403          * transactions, then it may try to do a jbd2_journal_restart() while
404          * there are still BJ_Reserved buffers outstanding.  These must
405          * be released cleanly from the current transaction.
406          *
407          * In this case, the filesystem must still reserve write access
408          * again before modifying the buffer in the new transaction, but
409          * we do not require it to remember exactly which old buffers it
410          * has reserved.  This is consistent with the existing behaviour
411          * that multiple jbd2_journal_get_write_access() calls to the same
412          * buffer are perfectly permissible.
413          */
414         while (commit_transaction->t_reserved_list) {
415                 jh = commit_transaction->t_reserved_list;
416                 JBUFFER_TRACE(jh, "reserved, unused: refile");
417                 /*
418                  * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
419                  * leave undo-committed data.
420                  */
421                 if (jh->b_committed_data) {
422                         struct buffer_head *bh = jh2bh(jh);
423
424                         jbd_lock_bh_state(bh);
425                         jbd2_free(jh->b_committed_data, bh->b_size);
426                         jh->b_committed_data = NULL;
427                         jbd_unlock_bh_state(bh);
428                 }
429                 jbd2_journal_refile_buffer(journal, jh);
430         }
431
432         /*
433          * Now try to drop any written-back buffers from the journal's
434          * checkpoint lists.  We do this *before* commit because it potentially
435          * frees some memory
436          */
437         spin_lock(&journal->j_list_lock);
438         __jbd2_journal_clean_checkpoint_list(journal, false);
439         spin_unlock(&journal->j_list_lock);
440
441         jbd_debug(3, "JBD2: commit phase 1\n");
442
443         /*
444          * Switch to a new revoke table.
445          */
446         jbd2_journal_switch_revoke_table(journal);
447
448         trace_jbd2_commit_flushing(journal, commit_transaction);
449         stats.run.rs_flushing = jiffies;
450         stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
451                                              stats.run.rs_flushing);
452
453         commit_transaction->t_state = T_FLUSH;
454         journal->j_committing_transaction = commit_transaction;
455         journal->j_running_transaction = NULL;
456         start_time = ktime_get();
457         commit_transaction->t_log_start = journal->j_head;
458         wake_up(&journal->j_wait_transaction_locked);
459         write_unlock(&journal->j_state_lock);
460
461         jbd_debug(3, "JBD2: commit phase 2\n");
462
463         /*
464          * Now start flushing things to disk, in the order they appear
465          * on the transaction lists.  Data blocks go first.
466          */
467         err = journal_submit_data_buffers(journal, commit_transaction);
468         if (err)
469                 jbd2_journal_abort(journal, err);
470
471         blk_start_plug(&plug);
472         jbd2_journal_write_revoke_records(journal, commit_transaction,
473                                           WRITE_SYNC);
474         blk_finish_plug(&plug);
475
476         jbd_debug(3, "JBD2: commit phase 2\n");
477
478         /*
479          * Way to go: we have now written out all of the data for a
480          * transaction!  Now comes the tricky part: we need to write out
481          * metadata.  Loop over the transaction's entire buffer list:
482          */
483         write_lock(&journal->j_state_lock);
484         commit_transaction->t_state = T_COMMIT;
485         write_unlock(&journal->j_state_lock);
486
487         trace_jbd2_commit_logging(journal, commit_transaction);
488         stats.run.rs_logging = jiffies;
489         stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
490                                                stats.run.rs_logging);
491         stats.run.rs_blocks =
492                 atomic_read(&commit_transaction->t_outstanding_credits);
493         stats.run.rs_blocks_logged = 0;
494
495         J_ASSERT(commit_transaction->t_nr_buffers <=
496                  atomic_read(&commit_transaction->t_outstanding_credits));
497
498         err = 0;
499         descriptor = NULL;
500         bufs = 0;
501         blk_start_plug(&plug);
502         while (commit_transaction->t_buffers) {
503
504                 /* Find the next buffer to be journaled... */
505
506                 jh = commit_transaction->t_buffers;
507
508                 /* If we're in abort mode, we just un-journal the buffer and
509                    release it. */
510
511                 if (is_journal_aborted(journal)) {
512                         clear_buffer_jbddirty(jh2bh(jh));
513                         JBUFFER_TRACE(jh, "journal is aborting: refile");
514                         jbd2_buffer_abort_trigger(jh,
515                                                   jh->b_frozen_data ?
516                                                   jh->b_frozen_triggers :
517                                                   jh->b_triggers);
518                         jbd2_journal_refile_buffer(journal, jh);
519                         /* If that was the last one, we need to clean up
520                          * any descriptor buffers which may have been
521                          * already allocated, even if we are now
522                          * aborting. */
523                         if (!commit_transaction->t_buffers)
524                                 goto start_journal_io;
525                         continue;
526                 }
527
528                 /* Make sure we have a descriptor block in which to
529                    record the metadata buffer. */
530
531                 if (!descriptor) {
532                         struct buffer_head *bh;
533
534                         J_ASSERT (bufs == 0);
535
536                         jbd_debug(4, "JBD2: get descriptor\n");
537
538                         descriptor = jbd2_journal_get_descriptor_buffer(journal);
539                         if (!descriptor) {
540                                 jbd2_journal_abort(journal, -EIO);
541                                 continue;
542                         }
543
544                         bh = jh2bh(descriptor);
545                         jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
546                                 (unsigned long long)bh->b_blocknr, bh->b_data);
547                         header = (journal_header_t *)&bh->b_data[0];
548                         header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
549                         header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
550                         header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
551
552                         tagp = &bh->b_data[sizeof(journal_header_t)];
553                         space_left = bh->b_size - sizeof(journal_header_t);
554                         first_tag = 1;
555                         set_buffer_jwrite(bh);
556                         set_buffer_dirty(bh);
557                         wbuf[bufs++] = bh;
558
559                         /* Record it so that we can wait for IO
560                            completion later */
561                         BUFFER_TRACE(bh, "ph3: file as descriptor");
562                         jbd2_journal_file_buffer(descriptor, commit_transaction,
563                                         BJ_LogCtl);
564                 }
565
566                 /* Where is the buffer to be written? */
567
568                 err = jbd2_journal_next_log_block(journal, &blocknr);
569                 /* If the block mapping failed, just abandon the buffer
570                    and repeat this loop: we'll fall into the
571                    refile-on-abort condition above. */
572                 if (err) {
573                         jbd2_journal_abort(journal, err);
574                         continue;
575                 }
576
577                 /*
578                  * start_this_handle() uses t_outstanding_credits to determine
579                  * the free space in the log, but this counter is changed
580                  * by jbd2_journal_next_log_block() also.
581                  */
582                 atomic_dec(&commit_transaction->t_outstanding_credits);
583
584                 /* Bump b_count to prevent truncate from stumbling over
585                    the shadowed buffer!  @@@ This can go if we ever get
586                    rid of the BJ_IO/BJ_Shadow pairing of buffers. */
587                 atomic_inc(&jh2bh(jh)->b_count);
588
589                 /* Make a temporary IO buffer with which to write it out
590                    (this will requeue both the metadata buffer and the
591                    temporary IO buffer). new_bh goes on BJ_IO*/
592
593                 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
594                 /*
595                  * akpm: jbd2_journal_write_metadata_buffer() sets
596                  * new_bh->b_transaction to commit_transaction.
597                  * We need to clean this up before we release new_bh
598                  * (which is of type BJ_IO)
599                  */
600                 JBUFFER_TRACE(jh, "ph3: write metadata");
601                 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
602                                                       jh, &new_jh, blocknr);
603                 if (flags < 0) {
604                         jbd2_journal_abort(journal, flags);
605                         continue;
606                 }
607                 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
608                 wbuf[bufs++] = jh2bh(new_jh);
609
610                 /* Record the new block's tag in the current descriptor
611                    buffer */
612
613                 tag_flag = 0;
614                 if (flags & 1)
615                         tag_flag |= JBD2_FLAG_ESCAPE;
616                 if (!first_tag)
617                         tag_flag |= JBD2_FLAG_SAME_UUID;
618
619                 tag = (journal_block_tag_t *) tagp;
620                 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
621                 tag->t_flags = cpu_to_be32(tag_flag);
622                 tagp += tag_bytes;
623                 space_left -= tag_bytes;
624
625                 if (first_tag) {
626                         memcpy (tagp, journal->j_uuid, 16);
627                         tagp += 16;
628                         space_left -= 16;
629                         first_tag = 0;
630                 }
631
632                 /* If there's no more to do, or if the descriptor is full,
633                    let the IO rip! */
634
635                 if (bufs == journal->j_wbufsize ||
636                     commit_transaction->t_buffers == NULL ||
637                     space_left < tag_bytes + 16) {
638
639                         jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
640
641                         /* Write an end-of-descriptor marker before
642                            submitting the IOs.  "tag" still points to
643                            the last tag we set up. */
644
645                         tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
646
647 start_journal_io:
648                         for (i = 0; i < bufs; i++) {
649                                 struct buffer_head *bh = wbuf[i];
650                                 /*
651                                  * Compute checksum.
652                                  */
653                                 if (JBD2_HAS_COMPAT_FEATURE(journal,
654                                         JBD2_FEATURE_COMPAT_CHECKSUM)) {
655                                         crc32_sum =
656                                             jbd2_checksum_data(crc32_sum, bh);
657                                 }
658
659                                 lock_buffer(bh);
660                                 clear_buffer_dirty(bh);
661                                 set_buffer_uptodate(bh);
662                                 bh->b_end_io = journal_end_buffer_io_sync;
663                                 submit_bh(WRITE_SYNC, bh);
664                         }
665                         cond_resched();
666                         stats.run.rs_blocks_logged += bufs;
667
668                         /* Force a new descriptor to be generated next
669                            time round the loop. */
670                         descriptor = NULL;
671                         bufs = 0;
672                 }
673         }
674
675         err = journal_finish_inode_data_buffers(journal, commit_transaction);
676         if (err) {
677                 printk(KERN_WARNING
678                         "JBD2: Detected IO errors while flushing file data "
679                        "on %s\n", journal->j_devname);
680                 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
681                         jbd2_journal_abort(journal, err);
682                 err = 0;
683         }
684
685         write_lock(&journal->j_state_lock);
686         J_ASSERT(commit_transaction->t_state == T_COMMIT);
687         commit_transaction->t_state = T_COMMIT_DFLUSH;
688         write_unlock(&journal->j_state_lock);
689         /* 
690          * If the journal is not located on the file system device,
691          * then we must flush the file system device before we issue
692          * the commit record
693          */
694         if (commit_transaction->t_need_data_flush &&
695             (journal->j_fs_dev != journal->j_dev) &&
696             (journal->j_flags & JBD2_BARRIER))
697                 blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
698
699         /* Done it all: now write the commit record asynchronously. */
700         if (JBD2_HAS_INCOMPAT_FEATURE(journal,
701                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
702                 err = journal_submit_commit_record(journal, commit_transaction,
703                                                  &cbh, crc32_sum);
704                 if (err)
705                         __jbd2_journal_abort_hard(journal);
706         }
707
708         blk_finish_plug(&plug);
709
710         /* Lo and behold: we have just managed to send a transaction to
711            the log.  Before we can commit it, wait for the IO so far to
712            complete.  Control buffers being written are on the
713            transaction's t_log_list queue, and metadata buffers are on
714            the t_iobuf_list queue.
715
716            Wait for the buffers in reverse order.  That way we are
717            less likely to be woken up until all IOs have completed, and
718            so we incur less scheduling load.
719         */
720
721         jbd_debug(3, "JBD2: commit phase 3\n");
722
723         /*
724          * akpm: these are BJ_IO, and j_list_lock is not needed.
725          * See __journal_try_to_free_buffer.
726          */
727 wait_for_iobuf:
728         while (commit_transaction->t_iobuf_list != NULL) {
729                 struct buffer_head *bh;
730
731                 jh = commit_transaction->t_iobuf_list->b_tprev;
732                 bh = jh2bh(jh);
733                 if (buffer_locked(bh)) {
734                         wait_on_buffer(bh);
735                         goto wait_for_iobuf;
736                 }
737                 if (cond_resched())
738                         goto wait_for_iobuf;
739
740                 if (unlikely(!buffer_uptodate(bh)))
741                         err = -EIO;
742
743                 clear_buffer_jwrite(bh);
744
745                 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
746                 jbd2_journal_unfile_buffer(journal, jh);
747
748                 /*
749                  * ->t_iobuf_list should contain only dummy buffer_heads
750                  * which were created by jbd2_journal_write_metadata_buffer().
751                  */
752                 BUFFER_TRACE(bh, "dumping temporary bh");
753                 jbd2_journal_put_journal_head(jh);
754                 __brelse(bh);
755                 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
756                 free_buffer_head(bh);
757
758                 /* We also have to unlock and free the corresponding
759                    shadowed buffer */
760                 jh = commit_transaction->t_shadow_list->b_tprev;
761                 bh = jh2bh(jh);
762                 clear_bit(BH_JWrite, &bh->b_state);
763                 J_ASSERT_BH(bh, buffer_jbddirty(bh));
764
765                 /* The metadata is now released for reuse, but we need
766                    to remember it against this transaction so that when
767                    we finally commit, we can do any checkpointing
768                    required. */
769                 JBUFFER_TRACE(jh, "file as BJ_Forget");
770                 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
771                 /*
772                  * Wake up any transactions which were waiting for this IO to
773                  * complete. The barrier must be here so that changes by
774                  * jbd2_journal_file_buffer() take effect before wake_up_bit()
775                  * does the waitqueue check.
776                  */
777                 smp_mb();
778                 wake_up_bit(&bh->b_state, BH_Unshadow);
779                 JBUFFER_TRACE(jh, "brelse shadowed buffer");
780                 __brelse(bh);
781         }
782
783         J_ASSERT (commit_transaction->t_shadow_list == NULL);
784
785         jbd_debug(3, "JBD2: commit phase 4\n");
786
787         /* Here we wait for the revoke record and descriptor record buffers */
788  wait_for_ctlbuf:
789         while (commit_transaction->t_log_list != NULL) {
790                 struct buffer_head *bh;
791
792                 jh = commit_transaction->t_log_list->b_tprev;
793                 bh = jh2bh(jh);
794                 if (buffer_locked(bh)) {
795                         wait_on_buffer(bh);
796                         goto wait_for_ctlbuf;
797                 }
798                 if (cond_resched())
799                         goto wait_for_ctlbuf;
800
801                 if (unlikely(!buffer_uptodate(bh)))
802                         err = -EIO;
803
804                 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
805                 clear_buffer_jwrite(bh);
806                 jbd2_journal_unfile_buffer(journal, jh);
807                 jbd2_journal_put_journal_head(jh);
808                 __brelse(bh);           /* One for getblk */
809                 /* AKPM: bforget here */
810         }
811
812         if (err)
813                 jbd2_journal_abort(journal, err);
814
815         jbd_debug(3, "JBD2: commit phase 5\n");
816         write_lock(&journal->j_state_lock);
817         J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
818         commit_transaction->t_state = T_COMMIT_JFLUSH;
819         write_unlock(&journal->j_state_lock);
820
821         if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
822                                        JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
823                 err = journal_submit_commit_record(journal, commit_transaction,
824                                                 &cbh, crc32_sum);
825                 if (err)
826                         __jbd2_journal_abort_hard(journal);
827         }
828         if (cbh)
829                 err = journal_wait_on_commit_record(journal, cbh);
830         if (JBD2_HAS_INCOMPAT_FEATURE(journal,
831                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
832             journal->j_flags & JBD2_BARRIER) {
833                 blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
834         }
835
836         if (err)
837                 jbd2_journal_abort(journal, err);
838
839         /* End of a transaction!  Finally, we can do checkpoint
840            processing: any buffers committed as a result of this
841            transaction can be removed from any checkpoint list it was on
842            before. */
843
844         jbd_debug(3, "JBD2: commit phase 6\n");
845
846         J_ASSERT(list_empty(&commit_transaction->t_inode_list));
847         J_ASSERT(commit_transaction->t_buffers == NULL);
848         J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
849         J_ASSERT(commit_transaction->t_iobuf_list == NULL);
850         J_ASSERT(commit_transaction->t_shadow_list == NULL);
851         J_ASSERT(commit_transaction->t_log_list == NULL);
852
853 restart_loop:
854         /*
855          * As there are other places (journal_unmap_buffer()) adding buffers
856          * to this list we have to be careful and hold the j_list_lock.
857          */
858         spin_lock(&journal->j_list_lock);
859         while (commit_transaction->t_forget) {
860                 transaction_t *cp_transaction;
861                 struct buffer_head *bh;
862                 int try_to_free = 0;
863
864                 jh = commit_transaction->t_forget;
865                 spin_unlock(&journal->j_list_lock);
866                 bh = jh2bh(jh);
867                 /*
868                  * Get a reference so that bh cannot be freed before we are
869                  * done with it.
870                  */
871                 get_bh(bh);
872                 jbd_lock_bh_state(bh);
873                 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
874
875                 /*
876                  * If there is undo-protected committed data against
877                  * this buffer, then we can remove it now.  If it is a
878                  * buffer needing such protection, the old frozen_data
879                  * field now points to a committed version of the
880                  * buffer, so rotate that field to the new committed
881                  * data.
882                  *
883                  * Otherwise, we can just throw away the frozen data now.
884                  *
885                  * We also know that the frozen data has already fired
886                  * its triggers if they exist, so we can clear that too.
887                  */
888                 if (jh->b_committed_data) {
889                         jbd2_free(jh->b_committed_data, bh->b_size);
890                         jh->b_committed_data = NULL;
891                         if (jh->b_frozen_data) {
892                                 jh->b_committed_data = jh->b_frozen_data;
893                                 jh->b_frozen_data = NULL;
894                                 jh->b_frozen_triggers = NULL;
895                         }
896                 } else if (jh->b_frozen_data) {
897                         jbd2_free(jh->b_frozen_data, bh->b_size);
898                         jh->b_frozen_data = NULL;
899                         jh->b_frozen_triggers = NULL;
900                 }
901
902                 spin_lock(&journal->j_list_lock);
903                 cp_transaction = jh->b_cp_transaction;
904                 if (cp_transaction) {
905                         JBUFFER_TRACE(jh, "remove from old cp transaction");
906                         cp_transaction->t_chp_stats.cs_dropped++;
907                         __jbd2_journal_remove_checkpoint(jh);
908                 }
909
910                 /* Only re-checkpoint the buffer_head if it is marked
911                  * dirty.  If the buffer was added to the BJ_Forget list
912                  * by jbd2_journal_forget, it may no longer be dirty and
913                  * there's no point in keeping a checkpoint record for
914                  * it. */
915
916                 /* A buffer which has been freed while still being
917                  * journaled by a previous transaction may end up still
918                  * being dirty here, but we want to avoid writing back
919                  * that buffer in the future after the "add to orphan"
920                  * operation been committed,  That's not only a performance
921                  * gain, it also stops aliasing problems if the buffer is
922                  * left behind for writeback and gets reallocated for another
923                  * use in a different page. */
924                 if (buffer_freed(bh) && !jh->b_next_transaction) {
925                         clear_buffer_freed(bh);
926                         clear_buffer_jbddirty(bh);
927                 }
928
929                 if (buffer_jbddirty(bh)) {
930                         JBUFFER_TRACE(jh, "add to new checkpointing trans");
931                         __jbd2_journal_insert_checkpoint(jh, commit_transaction);
932                         if (is_journal_aborted(journal))
933                                 clear_buffer_jbddirty(bh);
934                 } else {
935                         J_ASSERT_BH(bh, !buffer_dirty(bh));
936                         /*
937                          * The buffer on BJ_Forget list and not jbddirty means
938                          * it has been freed by this transaction and hence it
939                          * could not have been reallocated until this
940                          * transaction has committed. *BUT* it could be
941                          * reallocated once we have written all the data to
942                          * disk and before we process the buffer on BJ_Forget
943                          * list.
944                          */
945                         if (!jh->b_next_transaction)
946                                 try_to_free = 1;
947                 }
948                 JBUFFER_TRACE(jh, "refile or unfile buffer");
949                 __jbd2_journal_refile_buffer(jh);
950                 jbd_unlock_bh_state(bh);
951                 if (try_to_free)
952                         release_buffer_page(bh);        /* Drops bh reference */
953                 else
954                         __brelse(bh);
955                 cond_resched_lock(&journal->j_list_lock);
956         }
957         spin_unlock(&journal->j_list_lock);
958         /*
959          * This is a bit sleazy.  We use j_list_lock to protect transition
960          * of a transaction into T_FINISHED state and calling
961          * __jbd2_journal_drop_transaction(). Otherwise we could race with
962          * other checkpointing code processing the transaction...
963          */
964         write_lock(&journal->j_state_lock);
965         spin_lock(&journal->j_list_lock);
966         /*
967          * Now recheck if some buffers did not get attached to the transaction
968          * while the lock was dropped...
969          */
970         if (commit_transaction->t_forget) {
971                 spin_unlock(&journal->j_list_lock);
972                 write_unlock(&journal->j_state_lock);
973                 goto restart_loop;
974         }
975
976         /* Done with this transaction! */
977
978         jbd_debug(3, "JBD2: commit phase 7\n");
979
980         J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
981
982         commit_transaction->t_start = jiffies;
983         stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
984                                               commit_transaction->t_start);
985
986         /*
987          * File the transaction statistics
988          */
989         stats.ts_tid = commit_transaction->t_tid;
990         stats.run.rs_handle_count =
991                 atomic_read(&commit_transaction->t_handle_count);
992         trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
993                              commit_transaction->t_tid, &stats.run);
994
995         /*
996          * Calculate overall stats
997          */
998         spin_lock(&journal->j_history_lock);
999         journal->j_stats.ts_tid++;
1000         journal->j_stats.run.rs_wait += stats.run.rs_wait;
1001         journal->j_stats.run.rs_running += stats.run.rs_running;
1002         journal->j_stats.run.rs_locked += stats.run.rs_locked;
1003         journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1004         journal->j_stats.run.rs_logging += stats.run.rs_logging;
1005         journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1006         journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1007         journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1008         spin_unlock(&journal->j_history_lock);
1009
1010         commit_transaction->t_state = T_COMMIT_CALLBACK;
1011         J_ASSERT(commit_transaction == journal->j_committing_transaction);
1012         journal->j_commit_sequence = commit_transaction->t_tid;
1013         journal->j_committing_transaction = NULL;
1014         commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1015
1016         /*
1017          * weight the commit time higher than the average time so we don't
1018          * react too strongly to vast changes in the commit time
1019          */
1020         if (likely(journal->j_average_commit_time))
1021                 journal->j_average_commit_time = (commit_time +
1022                                 journal->j_average_commit_time*3) / 4;
1023         else
1024                 journal->j_average_commit_time = commit_time;
1025
1026         write_unlock(&journal->j_state_lock);
1027
1028         if (journal->j_checkpoint_transactions == NULL) {
1029                 journal->j_checkpoint_transactions = commit_transaction;
1030                 commit_transaction->t_cpnext = commit_transaction;
1031                 commit_transaction->t_cpprev = commit_transaction;
1032         } else {
1033                 commit_transaction->t_cpnext =
1034                         journal->j_checkpoint_transactions;
1035                 commit_transaction->t_cpprev =
1036                         commit_transaction->t_cpnext->t_cpprev;
1037                 commit_transaction->t_cpnext->t_cpprev =
1038                         commit_transaction;
1039                 commit_transaction->t_cpprev->t_cpnext =
1040                                 commit_transaction;
1041         }
1042         spin_unlock(&journal->j_list_lock);
1043         /* Drop all spin_locks because commit_callback may be block.
1044          * __journal_remove_checkpoint() can not destroy transaction
1045          * under us because it is not marked as T_FINISHED yet */
1046         if (journal->j_commit_callback)
1047                 journal->j_commit_callback(journal, commit_transaction);
1048
1049         trace_jbd2_end_commit(journal, commit_transaction);
1050         jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1051                   journal->j_commit_sequence, journal->j_tail_sequence);
1052
1053         write_lock(&journal->j_state_lock);
1054         spin_lock(&journal->j_list_lock);
1055         commit_transaction->t_state = T_FINISHED;
1056         /* Recheck checkpoint lists after j_list_lock was dropped */
1057         if (commit_transaction->t_checkpoint_list == NULL &&
1058             commit_transaction->t_checkpoint_io_list == NULL) {
1059                 __jbd2_journal_drop_transaction(journal, commit_transaction);
1060                 kfree(commit_transaction);
1061         }
1062         spin_unlock(&journal->j_list_lock);
1063         write_unlock(&journal->j_state_lock);
1064         wake_up(&journal->j_wait_done_commit);
1065 }