1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Defines functions of journalling api
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/highmem.h>
30 #include <linux/kthread.h>
32 #define MLOG_MASK_PREFIX ML_JOURNAL
33 #include <cluster/masklog.h>
38 #include "blockcheck.h"
41 #include "extent_map.h"
42 #include "heartbeat.h"
45 #include "localalloc.h"
51 #include "buffer_head_io.h"
53 DEFINE_SPINLOCK(trans_inc_lock);
55 static int ocfs2_force_read_journal(struct inode *inode);
56 static int ocfs2_recover_node(struct ocfs2_super *osb,
57 int node_num, int slot_num);
58 static int __ocfs2_recovery_thread(void *arg);
59 static int ocfs2_commit_cache(struct ocfs2_super *osb);
60 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota);
61 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
62 int dirty, int replayed);
63 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
65 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
67 static int ocfs2_commit_thread(void *arg);
69 static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb)
71 return __ocfs2_wait_on_mount(osb, 0);
74 static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb)
76 return __ocfs2_wait_on_mount(osb, 1);
82 * The recovery_list is a simple linked list of node numbers to recover.
83 * It is protected by the recovery_lock.
86 struct ocfs2_recovery_map {
88 unsigned int *rm_entries;
91 int ocfs2_recovery_init(struct ocfs2_super *osb)
93 struct ocfs2_recovery_map *rm;
95 mutex_init(&osb->recovery_lock);
96 osb->disable_recovery = 0;
97 osb->recovery_thread_task = NULL;
98 init_waitqueue_head(&osb->recovery_event);
100 rm = kzalloc(sizeof(struct ocfs2_recovery_map) +
101 osb->max_slots * sizeof(unsigned int),
108 rm->rm_entries = (unsigned int *)((char *)rm +
109 sizeof(struct ocfs2_recovery_map));
110 osb->recovery_map = rm;
115 /* we can't grab the goofy sem lock from inside wait_event, so we use
116 * memory barriers to make sure that we'll see the null task before
118 static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
121 return osb->recovery_thread_task != NULL;
124 void ocfs2_recovery_exit(struct ocfs2_super *osb)
126 struct ocfs2_recovery_map *rm;
128 /* disable any new recovery threads and wait for any currently
129 * running ones to exit. Do this before setting the vol_state. */
130 mutex_lock(&osb->recovery_lock);
131 osb->disable_recovery = 1;
132 mutex_unlock(&osb->recovery_lock);
133 wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
135 /* At this point, we know that no more recovery threads can be
136 * launched, so wait for any recovery completion work to
138 flush_workqueue(ocfs2_wq);
141 * Now that recovery is shut down, and the osb is about to be
142 * freed, the osb_lock is not taken here.
144 rm = osb->recovery_map;
145 /* XXX: Should we bug if there are dirty entries? */
150 static int __ocfs2_recovery_map_test(struct ocfs2_super *osb,
151 unsigned int node_num)
154 struct ocfs2_recovery_map *rm = osb->recovery_map;
156 assert_spin_locked(&osb->osb_lock);
158 for (i = 0; i < rm->rm_used; i++) {
159 if (rm->rm_entries[i] == node_num)
166 /* Behaves like test-and-set. Returns the previous value */
167 static int ocfs2_recovery_map_set(struct ocfs2_super *osb,
168 unsigned int node_num)
170 struct ocfs2_recovery_map *rm = osb->recovery_map;
172 spin_lock(&osb->osb_lock);
173 if (__ocfs2_recovery_map_test(osb, node_num)) {
174 spin_unlock(&osb->osb_lock);
178 /* XXX: Can this be exploited? Not from o2dlm... */
179 BUG_ON(rm->rm_used >= osb->max_slots);
181 rm->rm_entries[rm->rm_used] = node_num;
183 spin_unlock(&osb->osb_lock);
188 static void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
189 unsigned int node_num)
192 struct ocfs2_recovery_map *rm = osb->recovery_map;
194 spin_lock(&osb->osb_lock);
196 for (i = 0; i < rm->rm_used; i++) {
197 if (rm->rm_entries[i] == node_num)
201 if (i < rm->rm_used) {
202 /* XXX: be careful with the pointer math */
203 memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]),
204 (rm->rm_used - i - 1) * sizeof(unsigned int));
208 spin_unlock(&osb->osb_lock);
211 static int ocfs2_commit_cache(struct ocfs2_super *osb)
214 unsigned int flushed;
215 unsigned long old_id;
216 struct ocfs2_journal *journal = NULL;
220 journal = osb->journal;
222 /* Flush all pending commits and checkpoint the journal. */
223 down_write(&journal->j_trans_barrier);
225 if (atomic_read(&journal->j_num_trans) == 0) {
226 up_write(&journal->j_trans_barrier);
227 mlog(0, "No transactions for me to flush!\n");
231 jbd2_journal_lock_updates(journal->j_journal);
232 status = jbd2_journal_flush(journal->j_journal);
233 jbd2_journal_unlock_updates(journal->j_journal);
235 up_write(&journal->j_trans_barrier);
240 old_id = ocfs2_inc_trans_id(journal);
242 flushed = atomic_read(&journal->j_num_trans);
243 atomic_set(&journal->j_num_trans, 0);
244 up_write(&journal->j_trans_barrier);
246 mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n",
247 journal->j_trans_id, flushed);
249 ocfs2_wake_downconvert_thread(osb);
250 wake_up(&journal->j_checkpointed);
256 /* pass it NULL and it will allocate a new handle object for you. If
257 * you pass it a handle however, it may still return error, in which
258 * case it has free'd the passed handle for you. */
259 handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
261 journal_t *journal = osb->journal->j_journal;
264 BUG_ON(!osb || !osb->journal->j_journal);
266 if (ocfs2_is_hard_readonly(osb))
267 return ERR_PTR(-EROFS);
269 BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
270 BUG_ON(max_buffs <= 0);
272 /* Nested transaction? Just return the handle... */
273 if (journal_current_handle())
274 return jbd2_journal_start(journal, max_buffs);
276 down_read(&osb->journal->j_trans_barrier);
278 handle = jbd2_journal_start(journal, max_buffs);
279 if (IS_ERR(handle)) {
280 up_read(&osb->journal->j_trans_barrier);
282 mlog_errno(PTR_ERR(handle));
284 if (is_journal_aborted(journal)) {
285 ocfs2_abort(osb->sb, "Detected aborted journal");
286 handle = ERR_PTR(-EROFS);
289 if (!ocfs2_mount_local(osb))
290 atomic_inc(&(osb->journal->j_num_trans));
296 int ocfs2_commit_trans(struct ocfs2_super *osb,
300 struct ocfs2_journal *journal = osb->journal;
304 nested = handle->h_ref > 1;
305 ret = jbd2_journal_stop(handle);
310 up_read(&journal->j_trans_barrier);
316 * 'nblocks' is what you want to add to the current
317 * transaction. extend_trans will either extend the current handle by
318 * nblocks, or commit it and start a new one with nblocks credits.
320 * This might call jbd2_journal_restart() which will commit dirty buffers
321 * and then restart the transaction. Before calling
322 * ocfs2_extend_trans(), any changed blocks should have been
323 * dirtied. After calling it, all blocks which need to be changed must
324 * go through another set of journal_access/journal_dirty calls.
326 * WARNING: This will not release any semaphores or disk locks taken
327 * during the transaction, so make sure they were taken *before*
328 * start_trans or we'll have ordering deadlocks.
330 * WARNING2: Note that we do *not* drop j_trans_barrier here. This is
331 * good because transaction ids haven't yet been recorded on the
332 * cluster locks associated with this handle.
334 int ocfs2_extend_trans(handle_t *handle, int nblocks)
343 mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
345 #ifdef CONFIG_OCFS2_DEBUG_FS
348 status = jbd2_journal_extend(handle, nblocks);
357 "jbd2_journal_extend failed, trying "
358 "jbd2_journal_restart\n");
359 status = jbd2_journal_restart(handle, nblocks);
373 struct ocfs2_triggers {
374 struct jbd2_buffer_trigger_type ot_triggers;
378 static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
380 return container_of(triggers, struct ocfs2_triggers, ot_triggers);
383 static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
384 struct buffer_head *bh,
385 void *data, size_t size)
387 struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
390 * We aren't guaranteed to have the superblock here, so we
391 * must unconditionally compute the ecc data.
392 * __ocfs2_journal_access() will only set the triggers if
393 * metaecc is enabled.
395 ocfs2_block_check_compute(data, size, data + ot->ot_offset);
399 * Quota blocks have their own trigger because the struct ocfs2_block_check
400 * offset depends on the blocksize.
402 static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
403 struct buffer_head *bh,
404 void *data, size_t size)
406 struct ocfs2_disk_dqtrailer *dqt =
407 ocfs2_block_dqtrailer(size, data);
410 * We aren't guaranteed to have the superblock here, so we
411 * must unconditionally compute the ecc data.
412 * __ocfs2_journal_access() will only set the triggers if
413 * metaecc is enabled.
415 ocfs2_block_check_compute(data, size, &dqt->dq_check);
418 static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
419 struct buffer_head *bh)
422 "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
423 "bh->b_blocknr = %llu\n",
425 (unsigned long long)bh->b_blocknr);
427 /* We aren't guaranteed to have the superblock here - but if we
428 * don't, it'll just crash. */
429 ocfs2_error(bh->b_assoc_map->host->i_sb,
430 "JBD2 has aborted our journal, ocfs2 cannot continue\n");
433 static struct ocfs2_triggers di_triggers = {
435 .t_commit = ocfs2_commit_trigger,
436 .t_abort = ocfs2_abort_trigger,
438 .ot_offset = offsetof(struct ocfs2_dinode, i_check),
441 static struct ocfs2_triggers eb_triggers = {
443 .t_commit = ocfs2_commit_trigger,
444 .t_abort = ocfs2_abort_trigger,
446 .ot_offset = offsetof(struct ocfs2_extent_block, h_check),
449 static struct ocfs2_triggers gd_triggers = {
451 .t_commit = ocfs2_commit_trigger,
452 .t_abort = ocfs2_abort_trigger,
454 .ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
457 static struct ocfs2_triggers xb_triggers = {
459 .t_commit = ocfs2_commit_trigger,
460 .t_abort = ocfs2_abort_trigger,
462 .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
465 static struct ocfs2_triggers dq_triggers = {
467 .t_commit = ocfs2_dq_commit_trigger,
468 .t_abort = ocfs2_abort_trigger,
472 static int __ocfs2_journal_access(handle_t *handle,
474 struct buffer_head *bh,
475 struct ocfs2_triggers *triggers,
484 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
485 (unsigned long long)bh->b_blocknr, type,
486 (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
487 "OCFS2_JOURNAL_ACCESS_CREATE" :
488 "OCFS2_JOURNAL_ACCESS_WRITE",
491 /* we can safely remove this assertion after testing. */
492 if (!buffer_uptodate(bh)) {
493 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
494 mlog(ML_ERROR, "b_blocknr=%llu\n",
495 (unsigned long long)bh->b_blocknr);
499 /* Set the current transaction information on the inode so
500 * that the locking code knows whether it can drop it's locks
501 * on this inode or not. We're protected from the commit
502 * thread updating the current transaction id until
503 * ocfs2_commit_trans() because ocfs2_start_trans() took
504 * j_trans_barrier for us. */
505 ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode);
507 mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
509 case OCFS2_JOURNAL_ACCESS_CREATE:
510 case OCFS2_JOURNAL_ACCESS_WRITE:
511 status = jbd2_journal_get_write_access(handle, bh);
514 case OCFS2_JOURNAL_ACCESS_UNDO:
515 status = jbd2_journal_get_undo_access(handle, bh);
520 mlog(ML_ERROR, "Uknown access type!\n");
522 if (!status && ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)) && triggers)
523 jbd2_journal_set_triggers(bh, &triggers->ot_triggers);
524 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
527 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
534 int ocfs2_journal_access_di(handle_t *handle, struct inode *inode,
535 struct buffer_head *bh, int type)
537 return __ocfs2_journal_access(handle, inode, bh, &di_triggers,
541 int ocfs2_journal_access_eb(handle_t *handle, struct inode *inode,
542 struct buffer_head *bh, int type)
544 return __ocfs2_journal_access(handle, inode, bh, &eb_triggers,
548 int ocfs2_journal_access_gd(handle_t *handle, struct inode *inode,
549 struct buffer_head *bh, int type)
551 return __ocfs2_journal_access(handle, inode, bh, &gd_triggers,
555 int ocfs2_journal_access_db(handle_t *handle, struct inode *inode,
556 struct buffer_head *bh, int type)
558 /* Right now, nothing for dirblocks */
559 return __ocfs2_journal_access(handle, inode, bh, NULL, type);
562 int ocfs2_journal_access_xb(handle_t *handle, struct inode *inode,
563 struct buffer_head *bh, int type)
565 return __ocfs2_journal_access(handle, inode, bh, &xb_triggers,
569 int ocfs2_journal_access_dq(handle_t *handle, struct inode *inode,
570 struct buffer_head *bh, int type)
572 return __ocfs2_journal_access(handle, inode, bh, &dq_triggers,
576 int ocfs2_journal_access(handle_t *handle, struct inode *inode,
577 struct buffer_head *bh, int type)
579 return __ocfs2_journal_access(handle, inode, bh, NULL, type);
582 int ocfs2_journal_dirty(handle_t *handle,
583 struct buffer_head *bh)
587 mlog_entry("(bh->b_blocknr=%llu)\n",
588 (unsigned long long)bh->b_blocknr);
590 status = jbd2_journal_dirty_metadata(handle, bh);
592 mlog(ML_ERROR, "Could not dirty metadata buffer. "
593 "(bh->b_blocknr=%llu)\n",
594 (unsigned long long)bh->b_blocknr);
600 #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
602 void ocfs2_set_journal_params(struct ocfs2_super *osb)
604 journal_t *journal = osb->journal->j_journal;
605 unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL;
607 if (osb->osb_commit_interval)
608 commit_interval = osb->osb_commit_interval;
610 spin_lock(&journal->j_state_lock);
611 journal->j_commit_interval = commit_interval;
612 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
613 journal->j_flags |= JBD2_BARRIER;
615 journal->j_flags &= ~JBD2_BARRIER;
616 spin_unlock(&journal->j_state_lock);
619 int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
622 struct inode *inode = NULL; /* the journal inode */
623 journal_t *j_journal = NULL;
624 struct ocfs2_dinode *di = NULL;
625 struct buffer_head *bh = NULL;
626 struct ocfs2_super *osb;
633 osb = journal->j_osb;
635 /* already have the inode for our journal */
636 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
643 if (is_bad_inode(inode)) {
644 mlog(ML_ERROR, "access error (bad inode)\n");
651 SET_INODE_JOURNAL(inode);
652 OCFS2_I(inode)->ip_open_count++;
654 /* Skip recovery waits here - journal inode metadata never
655 * changes in a live cluster so it can be considered an
656 * exception to the rule. */
657 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
659 if (status != -ERESTARTSYS)
660 mlog(ML_ERROR, "Could not get lock on journal!\n");
665 di = (struct ocfs2_dinode *)bh->b_data;
667 if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) {
668 mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
674 mlog(0, "inode->i_size = %lld\n", inode->i_size);
675 mlog(0, "inode->i_blocks = %llu\n",
676 (unsigned long long)inode->i_blocks);
677 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
679 /* call the kernels journal init function now */
680 j_journal = jbd2_journal_init_inode(inode);
681 if (j_journal == NULL) {
682 mlog(ML_ERROR, "Linux journal layer error\n");
687 mlog(0, "Returned from jbd2_journal_init_inode\n");
688 mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
690 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
691 OCFS2_JOURNAL_DIRTY_FL);
693 journal->j_journal = j_journal;
694 journal->j_inode = inode;
697 ocfs2_set_journal_params(osb);
699 journal->j_state = OCFS2_JOURNAL_LOADED;
705 ocfs2_inode_unlock(inode, 1);
708 OCFS2_I(inode)->ip_open_count--;
717 static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di)
719 le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1);
722 static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di)
724 return le32_to_cpu(di->id1.journal1.ij_recovery_generation);
727 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
728 int dirty, int replayed)
732 struct ocfs2_journal *journal = osb->journal;
733 struct buffer_head *bh = journal->j_bh;
734 struct ocfs2_dinode *fe;
738 fe = (struct ocfs2_dinode *)bh->b_data;
740 /* The journal bh on the osb always comes from ocfs2_journal_init()
741 * and was validated there inside ocfs2_inode_lock_full(). It's a
742 * code bug if we mess it up. */
743 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
745 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
747 flags |= OCFS2_JOURNAL_DIRTY_FL;
749 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
750 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
753 ocfs2_bump_recovery_generation(fe);
755 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
756 status = ocfs2_write_block(osb, bh, journal->j_inode);
765 * If the journal has been kmalloc'd it needs to be freed after this
768 void ocfs2_journal_shutdown(struct ocfs2_super *osb)
770 struct ocfs2_journal *journal = NULL;
772 struct inode *inode = NULL;
773 int num_running_trans = 0;
779 journal = osb->journal;
783 inode = journal->j_inode;
785 if (journal->j_state != OCFS2_JOURNAL_LOADED)
788 /* need to inc inode use count - jbd2_journal_destroy will iput. */
792 num_running_trans = atomic_read(&(osb->journal->j_num_trans));
793 if (num_running_trans > 0)
794 mlog(0, "Shutting down journal: must wait on %d "
795 "running transactions!\n",
798 /* Do a commit_cache here. It will flush our journal, *and*
799 * release any locks that are still held.
800 * set the SHUTDOWN flag and release the trans lock.
801 * the commit thread will take the trans lock for us below. */
802 journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;
804 /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
805 * drop the trans_lock (which we want to hold until we
806 * completely destroy the journal. */
807 if (osb->commit_task) {
808 /* Wait for the commit thread */
809 mlog(0, "Waiting for ocfs2commit to exit....\n");
810 kthread_stop(osb->commit_task);
811 osb->commit_task = NULL;
814 BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
816 if (ocfs2_mount_local(osb)) {
817 jbd2_journal_lock_updates(journal->j_journal);
818 status = jbd2_journal_flush(journal->j_journal);
819 jbd2_journal_unlock_updates(journal->j_journal);
826 * Do not toggle if flush was unsuccessful otherwise
827 * will leave dirty metadata in a "clean" journal
829 status = ocfs2_journal_toggle_dirty(osb, 0, 0);
834 /* Shutdown the kernel journal system */
835 jbd2_journal_destroy(journal->j_journal);
836 journal->j_journal = NULL;
838 OCFS2_I(inode)->ip_open_count--;
840 /* unlock our journal */
841 ocfs2_inode_unlock(inode, 1);
843 brelse(journal->j_bh);
844 journal->j_bh = NULL;
846 journal->j_state = OCFS2_JOURNAL_FREE;
848 // up_write(&journal->j_trans_barrier);
855 static void ocfs2_clear_journal_error(struct super_block *sb,
861 olderr = jbd2_journal_errno(journal);
863 mlog(ML_ERROR, "File system error %d recorded in "
864 "journal %u.\n", olderr, slot);
865 mlog(ML_ERROR, "File system on device %s needs checking.\n",
868 jbd2_journal_ack_err(journal);
869 jbd2_journal_clear_err(journal);
873 int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
876 struct ocfs2_super *osb;
882 osb = journal->j_osb;
884 status = jbd2_journal_load(journal->j_journal);
886 mlog(ML_ERROR, "Failed to load journal!\n");
890 ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
892 status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
898 /* Launch the commit thread */
900 osb->commit_task = kthread_run(ocfs2_commit_thread, osb,
902 if (IS_ERR(osb->commit_task)) {
903 status = PTR_ERR(osb->commit_task);
904 osb->commit_task = NULL;
905 mlog(ML_ERROR, "unable to launch ocfs2commit thread, "
910 osb->commit_task = NULL;
918 /* 'full' flag tells us whether we clear out all blocks or if we just
919 * mark the journal clean */
920 int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
928 status = jbd2_journal_wipe(journal->j_journal, full);
934 status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0);
943 static int ocfs2_recovery_completed(struct ocfs2_super *osb)
946 struct ocfs2_recovery_map *rm = osb->recovery_map;
948 spin_lock(&osb->osb_lock);
949 empty = (rm->rm_used == 0);
950 spin_unlock(&osb->osb_lock);
955 void ocfs2_wait_for_recovery(struct ocfs2_super *osb)
957 wait_event(osb->recovery_event, ocfs2_recovery_completed(osb));
961 * JBD Might read a cached version of another nodes journal file. We
962 * don't want this as this file changes often and we get no
963 * notification on those changes. The only way to be sure that we've
964 * got the most up to date version of those blocks then is to force
965 * read them off disk. Just searching through the buffer cache won't
966 * work as there may be pages backing this file which are still marked
967 * up to date. We know things can't change on this file underneath us
968 * as we have the lock by now :)
970 static int ocfs2_force_read_journal(struct inode *inode)
974 u64 v_blkno, p_blkno, p_blocks, num_blocks;
975 #define CONCURRENT_JOURNAL_FILL 32ULL
976 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
980 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
982 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
984 while (v_blkno < num_blocks) {
985 status = ocfs2_extent_map_get_blocks(inode, v_blkno,
986 &p_blkno, &p_blocks, NULL);
992 if (p_blocks > CONCURRENT_JOURNAL_FILL)
993 p_blocks = CONCURRENT_JOURNAL_FILL;
995 /* We are reading journal data which should not
996 * be put in the uptodate cache */
997 status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb),
998 p_blkno, p_blocks, bhs);
1004 for(i = 0; i < p_blocks; i++) {
1009 v_blkno += p_blocks;
1013 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
1019 struct ocfs2_la_recovery_item {
1020 struct list_head lri_list;
1022 struct ocfs2_dinode *lri_la_dinode;
1023 struct ocfs2_dinode *lri_tl_dinode;
1024 struct ocfs2_quota_recovery *lri_qrec;
1027 /* Does the second half of the recovery process. By this point, the
1028 * node is marked clean and can actually be considered recovered,
1029 * hence it's no longer in the recovery map, but there's still some
1030 * cleanup we can do which shouldn't happen within the recovery thread
1031 * as locking in that context becomes very difficult if we are to take
1032 * recovering nodes into account.
1034 * NOTE: This function can and will sleep on recovery of other nodes
1035 * during cluster locking, just like any other ocfs2 process.
1037 void ocfs2_complete_recovery(struct work_struct *work)
1040 struct ocfs2_journal *journal =
1041 container_of(work, struct ocfs2_journal, j_recovery_work);
1042 struct ocfs2_super *osb = journal->j_osb;
1043 struct ocfs2_dinode *la_dinode, *tl_dinode;
1044 struct ocfs2_la_recovery_item *item, *n;
1045 struct ocfs2_quota_recovery *qrec;
1046 LIST_HEAD(tmp_la_list);
1050 mlog(0, "completing recovery from keventd\n");
1052 spin_lock(&journal->j_lock);
1053 list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
1054 spin_unlock(&journal->j_lock);
1056 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
1057 list_del_init(&item->lri_list);
1059 mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
1061 ocfs2_wait_on_quotas(osb);
1063 la_dinode = item->lri_la_dinode;
1065 mlog(0, "Clean up local alloc %llu\n",
1066 (unsigned long long)le64_to_cpu(la_dinode->i_blkno));
1068 ret = ocfs2_complete_local_alloc_recovery(osb,
1076 tl_dinode = item->lri_tl_dinode;
1078 mlog(0, "Clean up truncate log %llu\n",
1079 (unsigned long long)le64_to_cpu(tl_dinode->i_blkno));
1081 ret = ocfs2_complete_truncate_log_recovery(osb,
1089 ret = ocfs2_recover_orphans(osb, item->lri_slot);
1093 qrec = item->lri_qrec;
1095 mlog(0, "Recovering quota files");
1096 ret = ocfs2_finish_quota_recovery(osb, qrec,
1100 /* Recovery info is already freed now */
1106 mlog(0, "Recovery completion\n");
1110 /* NOTE: This function always eats your references to la_dinode and
1111 * tl_dinode, either manually on error, or by passing them to
1112 * ocfs2_complete_recovery */
1113 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
1115 struct ocfs2_dinode *la_dinode,
1116 struct ocfs2_dinode *tl_dinode,
1117 struct ocfs2_quota_recovery *qrec)
1119 struct ocfs2_la_recovery_item *item;
1121 item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
1123 /* Though we wish to avoid it, we are in fact safe in
1124 * skipping local alloc cleanup as fsck.ocfs2 is more
1125 * than capable of reclaiming unused space. */
1133 ocfs2_free_quota_recovery(qrec);
1135 mlog_errno(-ENOMEM);
1139 INIT_LIST_HEAD(&item->lri_list);
1140 item->lri_la_dinode = la_dinode;
1141 item->lri_slot = slot_num;
1142 item->lri_tl_dinode = tl_dinode;
1143 item->lri_qrec = qrec;
1145 spin_lock(&journal->j_lock);
1146 list_add_tail(&item->lri_list, &journal->j_la_cleanups);
1147 queue_work(ocfs2_wq, &journal->j_recovery_work);
1148 spin_unlock(&journal->j_lock);
1151 /* Called by the mount code to queue recovery the last part of
1152 * recovery for it's own slot. */
1153 void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
1155 struct ocfs2_journal *journal = osb->journal;
1158 /* No need to queue up our truncate_log as regular
1159 * cleanup will catch that. */
1160 ocfs2_queue_recovery_completion(journal,
1162 osb->local_alloc_copy,
1165 ocfs2_schedule_truncate_log_flush(osb, 0);
1167 osb->local_alloc_copy = NULL;
1172 void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
1174 if (osb->quota_rec) {
1175 ocfs2_queue_recovery_completion(osb->journal,
1180 osb->quota_rec = NULL;
1184 static int __ocfs2_recovery_thread(void *arg)
1186 int status, node_num, slot_num;
1187 struct ocfs2_super *osb = arg;
1188 struct ocfs2_recovery_map *rm = osb->recovery_map;
1189 int *rm_quota = NULL;
1190 int rm_quota_used = 0, i;
1191 struct ocfs2_quota_recovery *qrec;
1195 status = ocfs2_wait_on_mount(osb);
1200 rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS);
1206 status = ocfs2_super_lock(osb, 1);
1212 spin_lock(&osb->osb_lock);
1213 while (rm->rm_used) {
1214 /* It's always safe to remove entry zero, as we won't
1215 * clear it until ocfs2_recover_node() has succeeded. */
1216 node_num = rm->rm_entries[0];
1217 spin_unlock(&osb->osb_lock);
1218 mlog(0, "checking node %d\n", node_num);
1219 slot_num = ocfs2_node_num_to_slot(osb, node_num);
1220 if (slot_num == -ENOENT) {
1222 mlog(0, "no slot for this node, so no recovery"
1226 mlog(0, "node %d was using slot %d\n", node_num, slot_num);
1228 /* It is a bit subtle with quota recovery. We cannot do it
1229 * immediately because we have to obtain cluster locks from
1230 * quota files and we also don't want to just skip it because
1231 * then quota usage would be out of sync until some node takes
1232 * the slot. So we remember which nodes need quota recovery
1233 * and when everything else is done, we recover quotas. */
1234 for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++);
1235 if (i == rm_quota_used)
1236 rm_quota[rm_quota_used++] = slot_num;
1238 status = ocfs2_recover_node(osb, node_num, slot_num);
1241 ocfs2_recovery_map_clear(osb, node_num);
1244 "Error %d recovering node %d on device (%u,%u)!\n",
1246 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1247 mlog(ML_ERROR, "Volume requires unmount.\n");
1250 spin_lock(&osb->osb_lock);
1252 spin_unlock(&osb->osb_lock);
1253 mlog(0, "All nodes recovered\n");
1255 /* Refresh all journal recovery generations from disk */
1256 status = ocfs2_check_journals_nolocks(osb);
1257 status = (status == -EROFS) ? 0 : status;
1261 /* Now it is right time to recover quotas... We have to do this under
1262 * superblock lock so that noone can start using the slot (and crash)
1263 * before we recover it */
1264 for (i = 0; i < rm_quota_used; i++) {
1265 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
1267 status = PTR_ERR(qrec);
1271 ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
1275 ocfs2_super_unlock(osb, 1);
1277 /* We always run recovery on our own orphan dir - the dead
1278 * node(s) may have disallowd a previos inode delete. Re-processing
1279 * is therefore required. */
1280 ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
1284 mutex_lock(&osb->recovery_lock);
1285 if (!status && !ocfs2_recovery_completed(osb)) {
1286 mutex_unlock(&osb->recovery_lock);
1290 osb->recovery_thread_task = NULL;
1291 mb(); /* sync with ocfs2_recovery_thread_running */
1292 wake_up(&osb->recovery_event);
1294 mutex_unlock(&osb->recovery_lock);
1300 /* no one is callint kthread_stop() for us so the kthread() api
1301 * requires that we call do_exit(). And it isn't exported, but
1302 * complete_and_exit() seems to be a minimal wrapper around it. */
1303 complete_and_exit(NULL, status);
1307 void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1309 mlog_entry("(node_num=%d, osb->node_num = %d)\n",
1310 node_num, osb->node_num);
1312 mutex_lock(&osb->recovery_lock);
1313 if (osb->disable_recovery)
1316 /* People waiting on recovery will wait on
1317 * the recovery map to empty. */
1318 if (ocfs2_recovery_map_set(osb, node_num))
1319 mlog(0, "node %d already in recovery map.\n", node_num);
1321 mlog(0, "starting recovery thread...\n");
1323 if (osb->recovery_thread_task)
1326 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
1328 if (IS_ERR(osb->recovery_thread_task)) {
1329 mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
1330 osb->recovery_thread_task = NULL;
1334 mutex_unlock(&osb->recovery_lock);
1335 wake_up(&osb->recovery_event);
1340 static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
1342 struct buffer_head **bh,
1343 struct inode **ret_inode)
1345 int status = -EACCES;
1346 struct inode *inode = NULL;
1348 BUG_ON(slot_num >= osb->max_slots);
1350 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1352 if (!inode || is_bad_inode(inode)) {
1356 SET_INODE_JOURNAL(inode);
1358 status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE);
1368 if (status || !ret_inode)
1376 /* Does the actual journal replay and marks the journal inode as
1377 * clean. Will only replay if the journal inode is marked dirty. */
1378 static int ocfs2_replay_journal(struct ocfs2_super *osb,
1385 struct inode *inode = NULL;
1386 struct ocfs2_dinode *fe;
1387 journal_t *journal = NULL;
1388 struct buffer_head *bh = NULL;
1391 status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode);
1397 fe = (struct ocfs2_dinode *)bh->b_data;
1398 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1403 * As the fs recovery is asynchronous, there is a small chance that
1404 * another node mounted (and recovered) the slot before the recovery
1405 * thread could get the lock. To handle that, we dirty read the journal
1406 * inode for that slot to get the recovery generation. If it is
1407 * different than what we expected, the slot has been recovered.
1408 * If not, it needs recovery.
1410 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
1411 mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num,
1412 osb->slot_recovery_generations[slot_num], slot_reco_gen);
1413 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1418 /* Continue with recovery as the journal has not yet been recovered */
1420 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
1422 mlog(0, "status returned from ocfs2_inode_lock=%d\n", status);
1423 if (status != -ERESTARTSYS)
1424 mlog(ML_ERROR, "Could not lock journal!\n");
1429 fe = (struct ocfs2_dinode *) bh->b_data;
1431 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1432 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1434 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
1435 mlog(0, "No recovery required for node %d\n", node_num);
1436 /* Refresh recovery generation for the slot */
1437 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1441 mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
1443 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1445 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
1447 status = ocfs2_force_read_journal(inode);
1453 mlog(0, "calling journal_init_inode\n");
1454 journal = jbd2_journal_init_inode(inode);
1455 if (journal == NULL) {
1456 mlog(ML_ERROR, "Linux journal layer error\n");
1461 status = jbd2_journal_load(journal);
1466 jbd2_journal_destroy(journal);
1470 ocfs2_clear_journal_error(osb->sb, journal, slot_num);
1472 /* wipe the journal */
1473 mlog(0, "flushing the journal.\n");
1474 jbd2_journal_lock_updates(journal);
1475 status = jbd2_journal_flush(journal);
1476 jbd2_journal_unlock_updates(journal);
1480 /* This will mark the node clean */
1481 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1482 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
1483 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
1485 /* Increment recovery generation to indicate successful recovery */
1486 ocfs2_bump_recovery_generation(fe);
1487 osb->slot_recovery_generations[slot_num] =
1488 ocfs2_get_recovery_generation(fe);
1490 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
1491 status = ocfs2_write_block(osb, bh, inode);
1498 jbd2_journal_destroy(journal);
1501 /* drop the lock on this nodes journal */
1503 ocfs2_inode_unlock(inode, 1);
1515 * Do the most important parts of node recovery:
1516 * - Replay it's journal
1517 * - Stamp a clean local allocator file
1518 * - Stamp a clean truncate log
1519 * - Mark the node clean
1521 * If this function completes without error, a node in OCFS2 can be
1522 * said to have been safely recovered. As a result, failure during the
1523 * second part of a nodes recovery process (local alloc recovery) is
1524 * far less concerning.
1526 static int ocfs2_recover_node(struct ocfs2_super *osb,
1527 int node_num, int slot_num)
1530 struct ocfs2_dinode *la_copy = NULL;
1531 struct ocfs2_dinode *tl_copy = NULL;
1533 mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n",
1534 node_num, slot_num, osb->node_num);
1536 /* Should not ever be called to recover ourselves -- in that
1537 * case we should've called ocfs2_journal_load instead. */
1538 BUG_ON(osb->node_num == node_num);
1540 status = ocfs2_replay_journal(osb, node_num, slot_num);
1542 if (status == -EBUSY) {
1543 mlog(0, "Skipping recovery for slot %u (node %u) "
1544 "as another node has recovered it\n", slot_num,
1553 /* Stamp a clean local alloc file AFTER recovering the journal... */
1554 status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy);
1560 /* An error from begin_truncate_log_recovery is not
1561 * serious enough to warrant halting the rest of
1563 status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy);
1567 /* Likewise, this would be a strange but ultimately not so
1568 * harmful place to get an error... */
1569 status = ocfs2_clear_slot(osb, slot_num);
1573 /* This will kfree the memory pointed to by la_copy and tl_copy */
1574 ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
1584 /* Test node liveness by trylocking his journal. If we get the lock,
1585 * we drop it here. Return 0 if we got the lock, -EAGAIN if node is
1586 * still alive (we couldn't get the lock) and < 0 on error. */
1587 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
1591 struct inode *inode = NULL;
1593 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1595 if (inode == NULL) {
1596 mlog(ML_ERROR, "access error\n");
1600 if (is_bad_inode(inode)) {
1601 mlog(ML_ERROR, "access error (bad inode)\n");
1607 SET_INODE_JOURNAL(inode);
1609 flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
1610 status = ocfs2_inode_lock_full(inode, NULL, 1, flags);
1612 if (status != -EAGAIN)
1617 ocfs2_inode_unlock(inode, 1);
1625 /* Call this underneath ocfs2_super_lock. It also assumes that the
1626 * slot info struct has been updated from disk. */
1627 int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
1629 unsigned int node_num;
1632 struct buffer_head *bh = NULL;
1633 struct ocfs2_dinode *di;
1635 /* This is called with the super block cluster lock, so we
1636 * know that the slot map can't change underneath us. */
1638 for (i = 0; i < osb->max_slots; i++) {
1639 /* Read journal inode to get the recovery generation */
1640 status = ocfs2_read_journal_inode(osb, i, &bh, NULL);
1645 di = (struct ocfs2_dinode *)bh->b_data;
1646 gen = ocfs2_get_recovery_generation(di);
1650 spin_lock(&osb->osb_lock);
1651 osb->slot_recovery_generations[i] = gen;
1653 mlog(0, "Slot %u recovery generation is %u\n", i,
1654 osb->slot_recovery_generations[i]);
1656 if (i == osb->slot_num) {
1657 spin_unlock(&osb->osb_lock);
1661 status = ocfs2_slot_to_node_num_locked(osb, i, &node_num);
1662 if (status == -ENOENT) {
1663 spin_unlock(&osb->osb_lock);
1667 if (__ocfs2_recovery_map_test(osb, node_num)) {
1668 spin_unlock(&osb->osb_lock);
1671 spin_unlock(&osb->osb_lock);
1673 /* Ok, we have a slot occupied by another node which
1674 * is not in the recovery map. We trylock his journal
1675 * file here to test if he's alive. */
1676 status = ocfs2_trylock_journal(osb, i);
1678 /* Since we're called from mount, we know that
1679 * the recovery thread can't race us on
1680 * setting / checking the recovery bits. */
1681 ocfs2_recovery_thread(osb, node_num);
1682 } else if ((status < 0) && (status != -EAGAIN)) {
1694 struct ocfs2_orphan_filldir_priv {
1696 struct ocfs2_super *osb;
1699 static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
1700 loff_t pos, u64 ino, unsigned type)
1702 struct ocfs2_orphan_filldir_priv *p = priv;
1705 if (name_len == 1 && !strncmp(".", name, 1))
1707 if (name_len == 2 && !strncmp("..", name, 2))
1710 /* Skip bad inodes so that recovery can continue */
1711 iter = ocfs2_iget(p->osb, ino,
1712 OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0);
1716 mlog(0, "queue orphan %llu\n",
1717 (unsigned long long)OCFS2_I(iter)->ip_blkno);
1718 /* No locking is required for the next_orphan queue as there
1719 * is only ever a single process doing orphan recovery. */
1720 OCFS2_I(iter)->ip_next_orphan = p->head;
1726 static int ocfs2_queue_orphans(struct ocfs2_super *osb,
1728 struct inode **head)
1731 struct inode *orphan_dir_inode = NULL;
1732 struct ocfs2_orphan_filldir_priv priv;
1738 orphan_dir_inode = ocfs2_get_system_file_inode(osb,
1739 ORPHAN_DIR_SYSTEM_INODE,
1741 if (!orphan_dir_inode) {
1747 mutex_lock(&orphan_dir_inode->i_mutex);
1748 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
1754 status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv,
1755 ocfs2_orphan_filldir);
1764 ocfs2_inode_unlock(orphan_dir_inode, 0);
1766 mutex_unlock(&orphan_dir_inode->i_mutex);
1767 iput(orphan_dir_inode);
1771 static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb,
1776 spin_lock(&osb->osb_lock);
1777 ret = !osb->osb_orphan_wipes[slot];
1778 spin_unlock(&osb->osb_lock);
1782 static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb,
1785 spin_lock(&osb->osb_lock);
1786 /* Mark ourselves such that new processes in delete_inode()
1787 * know to quit early. */
1788 ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
1789 while (osb->osb_orphan_wipes[slot]) {
1790 /* If any processes are already in the middle of an
1791 * orphan wipe on this dir, then we need to wait for
1793 spin_unlock(&osb->osb_lock);
1794 wait_event_interruptible(osb->osb_wipe_event,
1795 ocfs2_orphan_recovery_can_continue(osb, slot));
1796 spin_lock(&osb->osb_lock);
1798 spin_unlock(&osb->osb_lock);
1801 static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb,
1804 ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
1808 * Orphan recovery. Each mounted node has it's own orphan dir which we
1809 * must run during recovery. Our strategy here is to build a list of
1810 * the inodes in the orphan dir and iget/iput them. The VFS does
1811 * (most) of the rest of the work.
1813 * Orphan recovery can happen at any time, not just mount so we have a
1814 * couple of extra considerations.
1816 * - We grab as many inodes as we can under the orphan dir lock -
1817 * doing iget() outside the orphan dir risks getting a reference on
1819 * - We must be sure not to deadlock with other processes on the
1820 * system wanting to run delete_inode(). This can happen when they go
1821 * to lock the orphan dir and the orphan recovery process attempts to
1822 * iget() inside the orphan dir lock. This can be avoided by
1823 * advertising our state to ocfs2_delete_inode().
1825 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
1829 struct inode *inode = NULL;
1831 struct ocfs2_inode_info *oi;
1833 mlog(0, "Recover inodes from orphan dir in slot %d\n", slot);
1835 ocfs2_mark_recovering_orphan_dir(osb, slot);
1836 ret = ocfs2_queue_orphans(osb, slot, &inode);
1837 ocfs2_clear_recovering_orphan_dir(osb, slot);
1839 /* Error here should be noted, but we want to continue with as
1840 * many queued inodes as we've got. */
1845 oi = OCFS2_I(inode);
1846 mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno);
1848 iter = oi->ip_next_orphan;
1850 spin_lock(&oi->ip_lock);
1851 /* The remote delete code may have set these on the
1852 * assumption that the other node would wipe them
1853 * successfully. If they are still in the node's
1854 * orphan dir, we need to reset that state. */
1855 oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE);
1857 /* Set the proper information to get us going into
1858 * ocfs2_delete_inode. */
1859 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
1860 spin_unlock(&oi->ip_lock);
1870 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
1872 /* This check is good because ocfs2 will wait on our recovery
1873 * thread before changing it to something other than MOUNTED
1875 wait_event(osb->osb_mount_event,
1876 (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) ||
1877 atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS ||
1878 atomic_read(&osb->vol_state) == VOLUME_DISABLED);
1880 /* If there's an error on mount, then we may never get to the
1881 * MOUNTED flag, but this is set right before
1882 * dismount_volume() so we can trust it. */
1883 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
1884 mlog(0, "mount error, exiting!\n");
1891 static int ocfs2_commit_thread(void *arg)
1894 struct ocfs2_super *osb = arg;
1895 struct ocfs2_journal *journal = osb->journal;
1897 /* we can trust j_num_trans here because _should_stop() is only set in
1898 * shutdown and nobody other than ourselves should be able to start
1899 * transactions. committing on shutdown might take a few iterations
1900 * as final transactions put deleted inodes on the list */
1901 while (!(kthread_should_stop() &&
1902 atomic_read(&journal->j_num_trans) == 0)) {
1904 wait_event_interruptible(osb->checkpoint_event,
1905 atomic_read(&journal->j_num_trans)
1906 || kthread_should_stop());
1908 status = ocfs2_commit_cache(osb);
1912 if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){
1914 "commit_thread: %u transactions pending on "
1916 atomic_read(&journal->j_num_trans));
1923 /* Reads all the journal inodes without taking any cluster locks. Used
1924 * for hard readonly access to determine whether any journal requires
1925 * recovery. Also used to refresh the recovery generation numbers after
1926 * a journal has been recovered by another node.
1928 int ocfs2_check_journals_nolocks(struct ocfs2_super *osb)
1932 struct buffer_head *di_bh = NULL;
1933 struct ocfs2_dinode *di;
1934 int journal_dirty = 0;
1936 for(slot = 0; slot < osb->max_slots; slot++) {
1937 ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL);
1943 di = (struct ocfs2_dinode *) di_bh->b_data;
1945 osb->slot_recovery_generations[slot] =
1946 ocfs2_get_recovery_generation(di);
1948 if (le32_to_cpu(di->id1.journal1.ij_flags) &
1949 OCFS2_JOURNAL_DIRTY_FL)