Merge commit 'v2.6.29-rc1' into x86/urgent
[pandora-kernel.git] / fs / jbd / commit.c
1 /*
2  * linux/fs/jbd/commit.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Journal commit routines for the generic filesystem journaling code;
13  * part of the ext2fs journaling system.
14  */
15
16 #include <linux/time.h>
17 #include <linux/fs.h>
18 #include <linux/jbd.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/mm.h>
22 #include <linux/pagemap.h>
23
24 /*
25  * Default IO end handler for temporary BJ_IO buffer_heads.
26  */
27 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
28 {
29         BUFFER_TRACE(bh, "");
30         if (uptodate)
31                 set_buffer_uptodate(bh);
32         else
33                 clear_buffer_uptodate(bh);
34         unlock_buffer(bh);
35 }
36
37 /*
38  * When an ext3-ordered file is truncated, it is possible that many pages are
39  * not successfully freed, because they are attached to a committing transaction.
40  * After the transaction commits, these pages are left on the LRU, with no
41  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
42  * by the VM, but their apparent absence upsets the VM accounting, and it makes
43  * the numbers in /proc/meminfo look odd.
44  *
45  * So here, we have a buffer which has just come off the forget list.  Look to
46  * see if we can strip all buffers from the backing page.
47  *
48  * Called under journal->j_list_lock.  The caller provided us with a ref
49  * against the buffer, and we drop that here.
50  */
51 static void release_buffer_page(struct buffer_head *bh)
52 {
53         struct page *page;
54
55         if (buffer_dirty(bh))
56                 goto nope;
57         if (atomic_read(&bh->b_count) != 1)
58                 goto nope;
59         page = bh->b_page;
60         if (!page)
61                 goto nope;
62         if (page->mapping)
63                 goto nope;
64
65         /* OK, it's a truncated page */
66         if (!trylock_page(page))
67                 goto nope;
68
69         page_cache_get(page);
70         __brelse(bh);
71         try_to_free_buffers(page);
72         unlock_page(page);
73         page_cache_release(page);
74         return;
75
76 nope:
77         __brelse(bh);
78 }
79
80 /*
81  * Decrement reference counter for data buffer. If it has been marked
82  * 'BH_Freed', release it and the page to which it belongs if possible.
83  */
84 static void release_data_buffer(struct buffer_head *bh)
85 {
86         if (buffer_freed(bh)) {
87                 clear_buffer_freed(bh);
88                 release_buffer_page(bh);
89         } else
90                 put_bh(bh);
91 }
92
93 /*
94  * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
95  * held.  For ranking reasons we must trylock.  If we lose, schedule away and
96  * return 0.  j_list_lock is dropped in this case.
97  */
98 static int inverted_lock(journal_t *journal, struct buffer_head *bh)
99 {
100         if (!jbd_trylock_bh_state(bh)) {
101                 spin_unlock(&journal->j_list_lock);
102                 schedule();
103                 return 0;
104         }
105         return 1;
106 }
107
108 /* Done it all: now write the commit record.  We should have
109  * cleaned up our previous buffers by now, so if we are in abort
110  * mode we can now just skip the rest of the journal write
111  * entirely.
112  *
113  * Returns 1 if the journal needs to be aborted or 0 on success
114  */
115 static int journal_write_commit_record(journal_t *journal,
116                                         transaction_t *commit_transaction)
117 {
118         struct journal_head *descriptor;
119         struct buffer_head *bh;
120         journal_header_t *header;
121         int ret;
122         int barrier_done = 0;
123
124         if (is_journal_aborted(journal))
125                 return 0;
126
127         descriptor = journal_get_descriptor_buffer(journal);
128         if (!descriptor)
129                 return 1;
130
131         bh = jh2bh(descriptor);
132
133         header = (journal_header_t *)(bh->b_data);
134         header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
135         header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
136         header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
137
138         JBUFFER_TRACE(descriptor, "write commit block");
139         set_buffer_dirty(bh);
140         if (journal->j_flags & JFS_BARRIER) {
141                 set_buffer_ordered(bh);
142                 barrier_done = 1;
143         }
144         ret = sync_dirty_buffer(bh);
145         if (barrier_done)
146                 clear_buffer_ordered(bh);
147         /* is it possible for another commit to fail at roughly
148          * the same time as this one?  If so, we don't want to
149          * trust the barrier flag in the super, but instead want
150          * to remember if we sent a barrier request
151          */
152         if (ret == -EOPNOTSUPP && barrier_done) {
153                 char b[BDEVNAME_SIZE];
154
155                 printk(KERN_WARNING
156                         "JBD: barrier-based sync failed on %s - "
157                         "disabling barriers\n",
158                         bdevname(journal->j_dev, b));
159                 spin_lock(&journal->j_state_lock);
160                 journal->j_flags &= ~JFS_BARRIER;
161                 spin_unlock(&journal->j_state_lock);
162
163                 /* And try again, without the barrier */
164                 set_buffer_uptodate(bh);
165                 set_buffer_dirty(bh);
166                 ret = sync_dirty_buffer(bh);
167         }
168         put_bh(bh);             /* One for getblk() */
169         journal_put_journal_head(descriptor);
170
171         return (ret == -EIO);
172 }
173
174 static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
175 {
176         int i;
177
178         for (i = 0; i < bufs; i++) {
179                 wbuf[i]->b_end_io = end_buffer_write_sync;
180                 /* We use-up our safety reference in submit_bh() */
181                 submit_bh(WRITE, wbuf[i]);
182         }
183 }
184
185 /*
186  *  Submit all the data buffers to disk
187  */
188 static int journal_submit_data_buffers(journal_t *journal,
189                                 transaction_t *commit_transaction)
190 {
191         struct journal_head *jh;
192         struct buffer_head *bh;
193         int locked;
194         int bufs = 0;
195         struct buffer_head **wbuf = journal->j_wbuf;
196         int err = 0;
197
198         /*
199          * Whenever we unlock the journal and sleep, things can get added
200          * onto ->t_sync_datalist, so we have to keep looping back to
201          * write_out_data until we *know* that the list is empty.
202          *
203          * Cleanup any flushed data buffers from the data list.  Even in
204          * abort mode, we want to flush this out as soon as possible.
205          */
206 write_out_data:
207         cond_resched();
208         spin_lock(&journal->j_list_lock);
209
210         while (commit_transaction->t_sync_datalist) {
211                 jh = commit_transaction->t_sync_datalist;
212                 bh = jh2bh(jh);
213                 locked = 0;
214
215                 /* Get reference just to make sure buffer does not disappear
216                  * when we are forced to drop various locks */
217                 get_bh(bh);
218                 /* If the buffer is dirty, we need to submit IO and hence
219                  * we need the buffer lock. We try to lock the buffer without
220                  * blocking. If we fail, we need to drop j_list_lock and do
221                  * blocking lock_buffer().
222                  */
223                 if (buffer_dirty(bh)) {
224                         if (!trylock_buffer(bh)) {
225                                 BUFFER_TRACE(bh, "needs blocking lock");
226                                 spin_unlock(&journal->j_list_lock);
227                                 /* Write out all data to prevent deadlocks */
228                                 journal_do_submit_data(wbuf, bufs);
229                                 bufs = 0;
230                                 lock_buffer(bh);
231                                 spin_lock(&journal->j_list_lock);
232                         }
233                         locked = 1;
234                 }
235                 /* We have to get bh_state lock. Again out of order, sigh. */
236                 if (!inverted_lock(journal, bh)) {
237                         jbd_lock_bh_state(bh);
238                         spin_lock(&journal->j_list_lock);
239                 }
240                 /* Someone already cleaned up the buffer? */
241                 if (!buffer_jbd(bh)
242                         || jh->b_transaction != commit_transaction
243                         || jh->b_jlist != BJ_SyncData) {
244                         jbd_unlock_bh_state(bh);
245                         if (locked)
246                                 unlock_buffer(bh);
247                         BUFFER_TRACE(bh, "already cleaned up");
248                         release_data_buffer(bh);
249                         continue;
250                 }
251                 if (locked && test_clear_buffer_dirty(bh)) {
252                         BUFFER_TRACE(bh, "needs writeout, adding to array");
253                         wbuf[bufs++] = bh;
254                         __journal_file_buffer(jh, commit_transaction,
255                                                 BJ_Locked);
256                         jbd_unlock_bh_state(bh);
257                         if (bufs == journal->j_wbufsize) {
258                                 spin_unlock(&journal->j_list_lock);
259                                 journal_do_submit_data(wbuf, bufs);
260                                 bufs = 0;
261                                 goto write_out_data;
262                         }
263                 } else if (!locked && buffer_locked(bh)) {
264                         __journal_file_buffer(jh, commit_transaction,
265                                                 BJ_Locked);
266                         jbd_unlock_bh_state(bh);
267                         put_bh(bh);
268                 } else {
269                         BUFFER_TRACE(bh, "writeout complete: unfile");
270                         if (unlikely(!buffer_uptodate(bh)))
271                                 err = -EIO;
272                         __journal_unfile_buffer(jh);
273                         jbd_unlock_bh_state(bh);
274                         if (locked)
275                                 unlock_buffer(bh);
276                         journal_remove_journal_head(bh);
277                         /* One for our safety reference, other for
278                          * journal_remove_journal_head() */
279                         put_bh(bh);
280                         release_data_buffer(bh);
281                 }
282
283                 if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
284                         spin_unlock(&journal->j_list_lock);
285                         goto write_out_data;
286                 }
287         }
288         spin_unlock(&journal->j_list_lock);
289         journal_do_submit_data(wbuf, bufs);
290
291         return err;
292 }
293
294 /*
295  * journal_commit_transaction
296  *
297  * The primary function for committing a transaction to the log.  This
298  * function is called by the journal thread to begin a complete commit.
299  */
300 void journal_commit_transaction(journal_t *journal)
301 {
302         transaction_t *commit_transaction;
303         struct journal_head *jh, *new_jh, *descriptor;
304         struct buffer_head **wbuf = journal->j_wbuf;
305         int bufs;
306         int flags;
307         int err;
308         unsigned long blocknr;
309         ktime_t start_time;
310         u64 commit_time;
311         char *tagp = NULL;
312         journal_header_t *header;
313         journal_block_tag_t *tag = NULL;
314         int space_left = 0;
315         int first_tag = 0;
316         int tag_flag;
317         int i;
318
319         /*
320          * First job: lock down the current transaction and wait for
321          * all outstanding updates to complete.
322          */
323
324 #ifdef COMMIT_STATS
325         spin_lock(&journal->j_list_lock);
326         summarise_journal_usage(journal);
327         spin_unlock(&journal->j_list_lock);
328 #endif
329
330         /* Do we need to erase the effects of a prior journal_flush? */
331         if (journal->j_flags & JFS_FLUSHED) {
332                 jbd_debug(3, "super block updated\n");
333                 journal_update_superblock(journal, 1);
334         } else {
335                 jbd_debug(3, "superblock not updated\n");
336         }
337
338         J_ASSERT(journal->j_running_transaction != NULL);
339         J_ASSERT(journal->j_committing_transaction == NULL);
340
341         commit_transaction = journal->j_running_transaction;
342         J_ASSERT(commit_transaction->t_state == T_RUNNING);
343
344         jbd_debug(1, "JBD: starting commit of transaction %d\n",
345                         commit_transaction->t_tid);
346
347         spin_lock(&journal->j_state_lock);
348         commit_transaction->t_state = T_LOCKED;
349
350         spin_lock(&commit_transaction->t_handle_lock);
351         while (commit_transaction->t_updates) {
352                 DEFINE_WAIT(wait);
353
354                 prepare_to_wait(&journal->j_wait_updates, &wait,
355                                         TASK_UNINTERRUPTIBLE);
356                 if (commit_transaction->t_updates) {
357                         spin_unlock(&commit_transaction->t_handle_lock);
358                         spin_unlock(&journal->j_state_lock);
359                         schedule();
360                         spin_lock(&journal->j_state_lock);
361                         spin_lock(&commit_transaction->t_handle_lock);
362                 }
363                 finish_wait(&journal->j_wait_updates, &wait);
364         }
365         spin_unlock(&commit_transaction->t_handle_lock);
366
367         J_ASSERT (commit_transaction->t_outstanding_credits <=
368                         journal->j_max_transaction_buffers);
369
370         /*
371          * First thing we are allowed to do is to discard any remaining
372          * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
373          * that there are no such buffers: if a large filesystem
374          * operation like a truncate needs to split itself over multiple
375          * transactions, then it may try to do a journal_restart() while
376          * there are still BJ_Reserved buffers outstanding.  These must
377          * be released cleanly from the current transaction.
378          *
379          * In this case, the filesystem must still reserve write access
380          * again before modifying the buffer in the new transaction, but
381          * we do not require it to remember exactly which old buffers it
382          * has reserved.  This is consistent with the existing behaviour
383          * that multiple journal_get_write_access() calls to the same
384          * buffer are perfectly permissable.
385          */
386         while (commit_transaction->t_reserved_list) {
387                 jh = commit_transaction->t_reserved_list;
388                 JBUFFER_TRACE(jh, "reserved, unused: refile");
389                 /*
390                  * A journal_get_undo_access()+journal_release_buffer() may
391                  * leave undo-committed data.
392                  */
393                 if (jh->b_committed_data) {
394                         struct buffer_head *bh = jh2bh(jh);
395
396                         jbd_lock_bh_state(bh);
397                         jbd_free(jh->b_committed_data, bh->b_size);
398                         jh->b_committed_data = NULL;
399                         jbd_unlock_bh_state(bh);
400                 }
401                 journal_refile_buffer(journal, jh);
402         }
403
404         /*
405          * Now try to drop any written-back buffers from the journal's
406          * checkpoint lists.  We do this *before* commit because it potentially
407          * frees some memory
408          */
409         spin_lock(&journal->j_list_lock);
410         __journal_clean_checkpoint_list(journal);
411         spin_unlock(&journal->j_list_lock);
412
413         jbd_debug (3, "JBD: commit phase 1\n");
414
415         /*
416          * Switch to a new revoke table.
417          */
418         journal_switch_revoke_table(journal);
419
420         commit_transaction->t_state = T_FLUSH;
421         journal->j_committing_transaction = commit_transaction;
422         journal->j_running_transaction = NULL;
423         start_time = ktime_get();
424         commit_transaction->t_log_start = journal->j_head;
425         wake_up(&journal->j_wait_transaction_locked);
426         spin_unlock(&journal->j_state_lock);
427
428         jbd_debug (3, "JBD: commit phase 2\n");
429
430         /*
431          * Now start flushing things to disk, in the order they appear
432          * on the transaction lists.  Data blocks go first.
433          */
434         err = journal_submit_data_buffers(journal, commit_transaction);
435
436         /*
437          * Wait for all previously submitted IO to complete.
438          */
439         spin_lock(&journal->j_list_lock);
440         while (commit_transaction->t_locked_list) {
441                 struct buffer_head *bh;
442
443                 jh = commit_transaction->t_locked_list->b_tprev;
444                 bh = jh2bh(jh);
445                 get_bh(bh);
446                 if (buffer_locked(bh)) {
447                         spin_unlock(&journal->j_list_lock);
448                         wait_on_buffer(bh);
449                         spin_lock(&journal->j_list_lock);
450                 }
451                 if (unlikely(!buffer_uptodate(bh))) {
452                         if (!trylock_page(bh->b_page)) {
453                                 spin_unlock(&journal->j_list_lock);
454                                 lock_page(bh->b_page);
455                                 spin_lock(&journal->j_list_lock);
456                         }
457                         if (bh->b_page->mapping)
458                                 set_bit(AS_EIO, &bh->b_page->mapping->flags);
459
460                         unlock_page(bh->b_page);
461                         SetPageError(bh->b_page);
462                         err = -EIO;
463                 }
464                 if (!inverted_lock(journal, bh)) {
465                         put_bh(bh);
466                         spin_lock(&journal->j_list_lock);
467                         continue;
468                 }
469                 if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
470                         __journal_unfile_buffer(jh);
471                         jbd_unlock_bh_state(bh);
472                         journal_remove_journal_head(bh);
473                         put_bh(bh);
474                 } else {
475                         jbd_unlock_bh_state(bh);
476                 }
477                 release_data_buffer(bh);
478                 cond_resched_lock(&journal->j_list_lock);
479         }
480         spin_unlock(&journal->j_list_lock);
481
482         if (err) {
483                 char b[BDEVNAME_SIZE];
484
485                 printk(KERN_WARNING
486                         "JBD: Detected IO errors while flushing file data "
487                         "on %s\n", bdevname(journal->j_fs_dev, b));
488                 if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
489                         journal_abort(journal, err);
490                 err = 0;
491         }
492
493         journal_write_revoke_records(journal, commit_transaction);
494
495         /*
496          * If we found any dirty or locked buffers, then we should have
497          * looped back up to the write_out_data label.  If there weren't
498          * any then journal_clean_data_list should have wiped the list
499          * clean by now, so check that it is in fact empty.
500          */
501         J_ASSERT (commit_transaction->t_sync_datalist == NULL);
502
503         jbd_debug (3, "JBD: commit phase 3\n");
504
505         /*
506          * Way to go: we have now written out all of the data for a
507          * transaction!  Now comes the tricky part: we need to write out
508          * metadata.  Loop over the transaction's entire buffer list:
509          */
510         spin_lock(&journal->j_state_lock);
511         commit_transaction->t_state = T_COMMIT;
512         spin_unlock(&journal->j_state_lock);
513
514         J_ASSERT(commit_transaction->t_nr_buffers <=
515                  commit_transaction->t_outstanding_credits);
516
517         descriptor = NULL;
518         bufs = 0;
519         while (commit_transaction->t_buffers) {
520
521                 /* Find the next buffer to be journaled... */
522
523                 jh = commit_transaction->t_buffers;
524
525                 /* If we're in abort mode, we just un-journal the buffer and
526                    release it. */
527
528                 if (is_journal_aborted(journal)) {
529                         clear_buffer_jbddirty(jh2bh(jh));
530                         JBUFFER_TRACE(jh, "journal is aborting: refile");
531                         journal_refile_buffer(journal, jh);
532                         /* If that was the last one, we need to clean up
533                          * any descriptor buffers which may have been
534                          * already allocated, even if we are now
535                          * aborting. */
536                         if (!commit_transaction->t_buffers)
537                                 goto start_journal_io;
538                         continue;
539                 }
540
541                 /* Make sure we have a descriptor block in which to
542                    record the metadata buffer. */
543
544                 if (!descriptor) {
545                         struct buffer_head *bh;
546
547                         J_ASSERT (bufs == 0);
548
549                         jbd_debug(4, "JBD: get descriptor\n");
550
551                         descriptor = journal_get_descriptor_buffer(journal);
552                         if (!descriptor) {
553                                 journal_abort(journal, -EIO);
554                                 continue;
555                         }
556
557                         bh = jh2bh(descriptor);
558                         jbd_debug(4, "JBD: got buffer %llu (%p)\n",
559                                 (unsigned long long)bh->b_blocknr, bh->b_data);
560                         header = (journal_header_t *)&bh->b_data[0];
561                         header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
562                         header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
563                         header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
564
565                         tagp = &bh->b_data[sizeof(journal_header_t)];
566                         space_left = bh->b_size - sizeof(journal_header_t);
567                         first_tag = 1;
568                         set_buffer_jwrite(bh);
569                         set_buffer_dirty(bh);
570                         wbuf[bufs++] = bh;
571
572                         /* Record it so that we can wait for IO
573                            completion later */
574                         BUFFER_TRACE(bh, "ph3: file as descriptor");
575                         journal_file_buffer(descriptor, commit_transaction,
576                                         BJ_LogCtl);
577                 }
578
579                 /* Where is the buffer to be written? */
580
581                 err = journal_next_log_block(journal, &blocknr);
582                 /* If the block mapping failed, just abandon the buffer
583                    and repeat this loop: we'll fall into the
584                    refile-on-abort condition above. */
585                 if (err) {
586                         journal_abort(journal, err);
587                         continue;
588                 }
589
590                 /*
591                  * start_this_handle() uses t_outstanding_credits to determine
592                  * the free space in the log, but this counter is changed
593                  * by journal_next_log_block() also.
594                  */
595                 commit_transaction->t_outstanding_credits--;
596
597                 /* Bump b_count to prevent truncate from stumbling over
598                    the shadowed buffer!  @@@ This can go if we ever get
599                    rid of the BJ_IO/BJ_Shadow pairing of buffers. */
600                 atomic_inc(&jh2bh(jh)->b_count);
601
602                 /* Make a temporary IO buffer with which to write it out
603                    (this will requeue both the metadata buffer and the
604                    temporary IO buffer). new_bh goes on BJ_IO*/
605
606                 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
607                 /*
608                  * akpm: journal_write_metadata_buffer() sets
609                  * new_bh->b_transaction to commit_transaction.
610                  * We need to clean this up before we release new_bh
611                  * (which is of type BJ_IO)
612                  */
613                 JBUFFER_TRACE(jh, "ph3: write metadata");
614                 flags = journal_write_metadata_buffer(commit_transaction,
615                                                       jh, &new_jh, blocknr);
616                 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
617                 wbuf[bufs++] = jh2bh(new_jh);
618
619                 /* Record the new block's tag in the current descriptor
620                    buffer */
621
622                 tag_flag = 0;
623                 if (flags & 1)
624                         tag_flag |= JFS_FLAG_ESCAPE;
625                 if (!first_tag)
626                         tag_flag |= JFS_FLAG_SAME_UUID;
627
628                 tag = (journal_block_tag_t *) tagp;
629                 tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
630                 tag->t_flags = cpu_to_be32(tag_flag);
631                 tagp += sizeof(journal_block_tag_t);
632                 space_left -= sizeof(journal_block_tag_t);
633
634                 if (first_tag) {
635                         memcpy (tagp, journal->j_uuid, 16);
636                         tagp += 16;
637                         space_left -= 16;
638                         first_tag = 0;
639                 }
640
641                 /* If there's no more to do, or if the descriptor is full,
642                    let the IO rip! */
643
644                 if (bufs == journal->j_wbufsize ||
645                     commit_transaction->t_buffers == NULL ||
646                     space_left < sizeof(journal_block_tag_t) + 16) {
647
648                         jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
649
650                         /* Write an end-of-descriptor marker before
651                            submitting the IOs.  "tag" still points to
652                            the last tag we set up. */
653
654                         tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
655
656 start_journal_io:
657                         for (i = 0; i < bufs; i++) {
658                                 struct buffer_head *bh = wbuf[i];
659                                 lock_buffer(bh);
660                                 clear_buffer_dirty(bh);
661                                 set_buffer_uptodate(bh);
662                                 bh->b_end_io = journal_end_buffer_io_sync;
663                                 submit_bh(WRITE, bh);
664                         }
665                         cond_resched();
666
667                         /* Force a new descriptor to be generated next
668                            time round the loop. */
669                         descriptor = NULL;
670                         bufs = 0;
671                 }
672         }
673
674         /* Lo and behold: we have just managed to send a transaction to
675            the log.  Before we can commit it, wait for the IO so far to
676            complete.  Control buffers being written are on the
677            transaction's t_log_list queue, and metadata buffers are on
678            the t_iobuf_list queue.
679
680            Wait for the buffers in reverse order.  That way we are
681            less likely to be woken up until all IOs have completed, and
682            so we incur less scheduling load.
683         */
684
685         jbd_debug(3, "JBD: commit phase 4\n");
686
687         /*
688          * akpm: these are BJ_IO, and j_list_lock is not needed.
689          * See __journal_try_to_free_buffer.
690          */
691 wait_for_iobuf:
692         while (commit_transaction->t_iobuf_list != NULL) {
693                 struct buffer_head *bh;
694
695                 jh = commit_transaction->t_iobuf_list->b_tprev;
696                 bh = jh2bh(jh);
697                 if (buffer_locked(bh)) {
698                         wait_on_buffer(bh);
699                         goto wait_for_iobuf;
700                 }
701                 if (cond_resched())
702                         goto wait_for_iobuf;
703
704                 if (unlikely(!buffer_uptodate(bh)))
705                         err = -EIO;
706
707                 clear_buffer_jwrite(bh);
708
709                 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
710                 journal_unfile_buffer(journal, jh);
711
712                 /*
713                  * ->t_iobuf_list should contain only dummy buffer_heads
714                  * which were created by journal_write_metadata_buffer().
715                  */
716                 BUFFER_TRACE(bh, "dumping temporary bh");
717                 journal_put_journal_head(jh);
718                 __brelse(bh);
719                 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
720                 free_buffer_head(bh);
721
722                 /* We also have to unlock and free the corresponding
723                    shadowed buffer */
724                 jh = commit_transaction->t_shadow_list->b_tprev;
725                 bh = jh2bh(jh);
726                 clear_bit(BH_JWrite, &bh->b_state);
727                 J_ASSERT_BH(bh, buffer_jbddirty(bh));
728
729                 /* The metadata is now released for reuse, but we need
730                    to remember it against this transaction so that when
731                    we finally commit, we can do any checkpointing
732                    required. */
733                 JBUFFER_TRACE(jh, "file as BJ_Forget");
734                 journal_file_buffer(jh, commit_transaction, BJ_Forget);
735                 /* Wake up any transactions which were waiting for this
736                    IO to complete */
737                 wake_up_bit(&bh->b_state, BH_Unshadow);
738                 JBUFFER_TRACE(jh, "brelse shadowed buffer");
739                 __brelse(bh);
740         }
741
742         J_ASSERT (commit_transaction->t_shadow_list == NULL);
743
744         jbd_debug(3, "JBD: commit phase 5\n");
745
746         /* Here we wait for the revoke record and descriptor record buffers */
747  wait_for_ctlbuf:
748         while (commit_transaction->t_log_list != NULL) {
749                 struct buffer_head *bh;
750
751                 jh = commit_transaction->t_log_list->b_tprev;
752                 bh = jh2bh(jh);
753                 if (buffer_locked(bh)) {
754                         wait_on_buffer(bh);
755                         goto wait_for_ctlbuf;
756                 }
757                 if (cond_resched())
758                         goto wait_for_ctlbuf;
759
760                 if (unlikely(!buffer_uptodate(bh)))
761                         err = -EIO;
762
763                 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
764                 clear_buffer_jwrite(bh);
765                 journal_unfile_buffer(journal, jh);
766                 journal_put_journal_head(jh);
767                 __brelse(bh);           /* One for getblk */
768                 /* AKPM: bforget here */
769         }
770
771         if (err)
772                 journal_abort(journal, err);
773
774         jbd_debug(3, "JBD: commit phase 6\n");
775
776         if (journal_write_commit_record(journal, commit_transaction))
777                 err = -EIO;
778
779         if (err)
780                 journal_abort(journal, err);
781
782         /* End of a transaction!  Finally, we can do checkpoint
783            processing: any buffers committed as a result of this
784            transaction can be removed from any checkpoint list it was on
785            before. */
786
787         jbd_debug(3, "JBD: commit phase 7\n");
788
789         J_ASSERT(commit_transaction->t_sync_datalist == NULL);
790         J_ASSERT(commit_transaction->t_buffers == NULL);
791         J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
792         J_ASSERT(commit_transaction->t_iobuf_list == NULL);
793         J_ASSERT(commit_transaction->t_shadow_list == NULL);
794         J_ASSERT(commit_transaction->t_log_list == NULL);
795
796 restart_loop:
797         /*
798          * As there are other places (journal_unmap_buffer()) adding buffers
799          * to this list we have to be careful and hold the j_list_lock.
800          */
801         spin_lock(&journal->j_list_lock);
802         while (commit_transaction->t_forget) {
803                 transaction_t *cp_transaction;
804                 struct buffer_head *bh;
805
806                 jh = commit_transaction->t_forget;
807                 spin_unlock(&journal->j_list_lock);
808                 bh = jh2bh(jh);
809                 jbd_lock_bh_state(bh);
810                 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
811                         jh->b_transaction == journal->j_running_transaction);
812
813                 /*
814                  * If there is undo-protected committed data against
815                  * this buffer, then we can remove it now.  If it is a
816                  * buffer needing such protection, the old frozen_data
817                  * field now points to a committed version of the
818                  * buffer, so rotate that field to the new committed
819                  * data.
820                  *
821                  * Otherwise, we can just throw away the frozen data now.
822                  */
823                 if (jh->b_committed_data) {
824                         jbd_free(jh->b_committed_data, bh->b_size);
825                         jh->b_committed_data = NULL;
826                         if (jh->b_frozen_data) {
827                                 jh->b_committed_data = jh->b_frozen_data;
828                                 jh->b_frozen_data = NULL;
829                         }
830                 } else if (jh->b_frozen_data) {
831                         jbd_free(jh->b_frozen_data, bh->b_size);
832                         jh->b_frozen_data = NULL;
833                 }
834
835                 spin_lock(&journal->j_list_lock);
836                 cp_transaction = jh->b_cp_transaction;
837                 if (cp_transaction) {
838                         JBUFFER_TRACE(jh, "remove from old cp transaction");
839                         __journal_remove_checkpoint(jh);
840                 }
841
842                 /* Only re-checkpoint the buffer_head if it is marked
843                  * dirty.  If the buffer was added to the BJ_Forget list
844                  * by journal_forget, it may no longer be dirty and
845                  * there's no point in keeping a checkpoint record for
846                  * it. */
847
848                 /* A buffer which has been freed while still being
849                  * journaled by a previous transaction may end up still
850                  * being dirty here, but we want to avoid writing back
851                  * that buffer in the future now that the last use has
852                  * been committed.  That's not only a performance gain,
853                  * it also stops aliasing problems if the buffer is left
854                  * behind for writeback and gets reallocated for another
855                  * use in a different page. */
856                 if (buffer_freed(bh)) {
857                         clear_buffer_freed(bh);
858                         clear_buffer_jbddirty(bh);
859                 }
860
861                 if (buffer_jbddirty(bh)) {
862                         JBUFFER_TRACE(jh, "add to new checkpointing trans");
863                         __journal_insert_checkpoint(jh, commit_transaction);
864                         if (is_journal_aborted(journal))
865                                 clear_buffer_jbddirty(bh);
866                         JBUFFER_TRACE(jh, "refile for checkpoint writeback");
867                         __journal_refile_buffer(jh);
868                         jbd_unlock_bh_state(bh);
869                 } else {
870                         J_ASSERT_BH(bh, !buffer_dirty(bh));
871                         /* The buffer on BJ_Forget list and not jbddirty means
872                          * it has been freed by this transaction and hence it
873                          * could not have been reallocated until this
874                          * transaction has committed. *BUT* it could be
875                          * reallocated once we have written all the data to
876                          * disk and before we process the buffer on BJ_Forget
877                          * list. */
878                         JBUFFER_TRACE(jh, "refile or unfile freed buffer");
879                         __journal_refile_buffer(jh);
880                         if (!jh->b_transaction) {
881                                 jbd_unlock_bh_state(bh);
882                                  /* needs a brelse */
883                                 journal_remove_journal_head(bh);
884                                 release_buffer_page(bh);
885                         } else
886                                 jbd_unlock_bh_state(bh);
887                 }
888                 cond_resched_lock(&journal->j_list_lock);
889         }
890         spin_unlock(&journal->j_list_lock);
891         /*
892          * This is a bit sleazy.  We use j_list_lock to protect transition
893          * of a transaction into T_FINISHED state and calling
894          * __journal_drop_transaction(). Otherwise we could race with
895          * other checkpointing code processing the transaction...
896          */
897         spin_lock(&journal->j_state_lock);
898         spin_lock(&journal->j_list_lock);
899         /*
900          * Now recheck if some buffers did not get attached to the transaction
901          * while the lock was dropped...
902          */
903         if (commit_transaction->t_forget) {
904                 spin_unlock(&journal->j_list_lock);
905                 spin_unlock(&journal->j_state_lock);
906                 goto restart_loop;
907         }
908
909         /* Done with this transaction! */
910
911         jbd_debug(3, "JBD: commit phase 8\n");
912
913         J_ASSERT(commit_transaction->t_state == T_COMMIT);
914
915         commit_transaction->t_state = T_FINISHED;
916         J_ASSERT(commit_transaction == journal->j_committing_transaction);
917         journal->j_commit_sequence = commit_transaction->t_tid;
918         journal->j_committing_transaction = NULL;
919         commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
920
921         /*
922          * weight the commit time higher than the average time so we don't
923          * react too strongly to vast changes in commit time
924          */
925         if (likely(journal->j_average_commit_time))
926                 journal->j_average_commit_time = (commit_time*3 +
927                                 journal->j_average_commit_time) / 4;
928         else
929                 journal->j_average_commit_time = commit_time;
930
931         spin_unlock(&journal->j_state_lock);
932
933         if (commit_transaction->t_checkpoint_list == NULL &&
934             commit_transaction->t_checkpoint_io_list == NULL) {
935                 __journal_drop_transaction(journal, commit_transaction);
936         } else {
937                 if (journal->j_checkpoint_transactions == NULL) {
938                         journal->j_checkpoint_transactions = commit_transaction;
939                         commit_transaction->t_cpnext = commit_transaction;
940                         commit_transaction->t_cpprev = commit_transaction;
941                 } else {
942                         commit_transaction->t_cpnext =
943                                 journal->j_checkpoint_transactions;
944                         commit_transaction->t_cpprev =
945                                 commit_transaction->t_cpnext->t_cpprev;
946                         commit_transaction->t_cpnext->t_cpprev =
947                                 commit_transaction;
948                         commit_transaction->t_cpprev->t_cpnext =
949                                 commit_transaction;
950                 }
951         }
952         spin_unlock(&journal->j_list_lock);
953
954         jbd_debug(1, "JBD: commit %d complete, head %d\n",
955                   journal->j_commit_sequence, journal->j_tail_sequence);
956
957         wake_up(&journal->j_wait_done_commit);
958 }