2 * segment.c - NILFS segment constructor.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/pagemap.h>
25 #include <linux/buffer_head.h>
26 #include <linux/writeback.h>
27 #include <linux/bio.h>
28 #include <linux/completion.h>
29 #include <linux/blkdev.h>
30 #include <linux/backing-dev.h>
31 #include <linux/freezer.h>
32 #include <linux/kthread.h>
33 #include <linux/crc32.h>
34 #include <linux/pagevec.h>
35 #include <linux/slab.h>
49 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
51 #define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments
52 appended in collection retry loop */
54 /* Construction mode */
56 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
57 SC_LSEG_DSYNC, /* Flush data blocks of a given file and make
58 a logical segment without a super root */
59 SC_FLUSH_FILE, /* Flush data files, leads to segment writes without
60 creating a checkpoint */
61 SC_FLUSH_DAT, /* Flush DAT file. This also creates segments without
65 /* Stage numbers of dirty block collection */
68 NILFS_ST_GC, /* Collecting dirty blocks for GC */
74 NILFS_ST_SR, /* Super root */
75 NILFS_ST_DSYNC, /* Data sync blocks */
79 /* State flags of collection */
80 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
81 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
82 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
83 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
85 /* Operations depending on the construction mode and file type */
86 struct nilfs_sc_operations {
87 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
89 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
91 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
93 void (*write_data_binfo)(struct nilfs_sc_info *,
94 struct nilfs_segsum_pointer *,
96 void (*write_node_binfo)(struct nilfs_sc_info *,
97 struct nilfs_segsum_pointer *,
104 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
105 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
106 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
107 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
109 #define nilfs_cnt32_gt(a, b) \
110 (typecheck(__u32, a) && typecheck(__u32, b) && \
111 ((__s32)(b) - (__s32)(a) < 0))
112 #define nilfs_cnt32_ge(a, b) \
113 (typecheck(__u32, a) && typecheck(__u32, b) && \
114 ((__s32)(a) - (__s32)(b) >= 0))
115 #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
116 #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
118 static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
120 struct nilfs_transaction_info *cur_ti = current->journal_info;
124 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
125 return ++cur_ti->ti_count;
128 * If journal_info field is occupied by other FS,
129 * it is saved and will be restored on
130 * nilfs_transaction_commit().
133 "NILFS warning: journal info from a different "
135 save = current->journal_info;
139 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
142 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
148 ti->ti_magic = NILFS_TI_MAGIC;
149 current->journal_info = ti;
154 * nilfs_transaction_begin - start indivisible file operations.
156 * @ti: nilfs_transaction_info
157 * @vacancy_check: flags for vacancy rate checks
159 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
160 * the segment semaphore, to make a segment construction and write tasks
161 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
162 * The region enclosed by these two functions can be nested. To avoid a
163 * deadlock, the semaphore is only acquired or released in the outermost call.
165 * This function allocates a nilfs_transaction_info struct to keep context
166 * information on it. It is initialized and hooked onto the current task in
167 * the outermost call. If a pre-allocated struct is given to @ti, it is used
168 * instead; otherwise a new struct is assigned from a slab.
170 * When @vacancy_check flag is set, this function will check the amount of
171 * free space, and will wait for the GC to reclaim disk space if low capacity.
173 * Return Value: On success, 0 is returned. On error, one of the following
174 * negative error code is returned.
176 * %-ENOMEM - Insufficient memory available.
178 * %-ENOSPC - No space left on device
180 int nilfs_transaction_begin(struct super_block *sb,
181 struct nilfs_transaction_info *ti,
184 struct the_nilfs *nilfs;
185 int ret = nilfs_prepare_segment_lock(ti);
187 if (unlikely(ret < 0))
192 vfs_check_frozen(sb, SB_FREEZE_WRITE);
194 nilfs = sb->s_fs_info;
195 down_read(&nilfs->ns_segctor_sem);
196 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
197 up_read(&nilfs->ns_segctor_sem);
204 ti = current->journal_info;
205 current->journal_info = ti->ti_save;
206 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
207 kmem_cache_free(nilfs_transaction_cachep, ti);
212 * nilfs_transaction_commit - commit indivisible file operations.
215 * nilfs_transaction_commit() releases the read semaphore which is
216 * acquired by nilfs_transaction_begin(). This is only performed
217 * in outermost call of this function. If a commit flag is set,
218 * nilfs_transaction_commit() sets a timer to start the segment
219 * constructor. If a sync flag is set, it starts construction
222 int nilfs_transaction_commit(struct super_block *sb)
224 struct nilfs_transaction_info *ti = current->journal_info;
225 struct the_nilfs *nilfs = sb->s_fs_info;
228 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
229 ti->ti_flags |= NILFS_TI_COMMIT;
230 if (ti->ti_count > 0) {
234 if (nilfs->ns_writer) {
235 struct nilfs_sc_info *sci = nilfs->ns_writer;
237 if (ti->ti_flags & NILFS_TI_COMMIT)
238 nilfs_segctor_start_timer(sci);
239 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
240 nilfs_segctor_do_flush(sci, 0);
242 up_read(&nilfs->ns_segctor_sem);
243 current->journal_info = ti->ti_save;
245 if (ti->ti_flags & NILFS_TI_SYNC)
246 err = nilfs_construct_segment(sb);
247 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
248 kmem_cache_free(nilfs_transaction_cachep, ti);
252 void nilfs_transaction_abort(struct super_block *sb)
254 struct nilfs_transaction_info *ti = current->journal_info;
255 struct the_nilfs *nilfs = sb->s_fs_info;
257 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
258 if (ti->ti_count > 0) {
262 up_read(&nilfs->ns_segctor_sem);
264 current->journal_info = ti->ti_save;
265 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
266 kmem_cache_free(nilfs_transaction_cachep, ti);
269 void nilfs_relax_pressure_in_lock(struct super_block *sb)
271 struct the_nilfs *nilfs = sb->s_fs_info;
272 struct nilfs_sc_info *sci = nilfs->ns_writer;
274 if (!sci || !sci->sc_flush_request)
277 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
278 up_read(&nilfs->ns_segctor_sem);
280 down_write(&nilfs->ns_segctor_sem);
281 if (sci->sc_flush_request &&
282 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
283 struct nilfs_transaction_info *ti = current->journal_info;
285 ti->ti_flags |= NILFS_TI_WRITER;
286 nilfs_segctor_do_immediate_flush(sci);
287 ti->ti_flags &= ~NILFS_TI_WRITER;
289 downgrade_write(&nilfs->ns_segctor_sem);
292 static void nilfs_transaction_lock(struct super_block *sb,
293 struct nilfs_transaction_info *ti,
296 struct nilfs_transaction_info *cur_ti = current->journal_info;
297 struct the_nilfs *nilfs = sb->s_fs_info;
298 struct nilfs_sc_info *sci = nilfs->ns_writer;
301 ti->ti_flags = NILFS_TI_WRITER;
303 ti->ti_save = cur_ti;
304 ti->ti_magic = NILFS_TI_MAGIC;
305 INIT_LIST_HEAD(&ti->ti_garbage);
306 current->journal_info = ti;
309 down_write(&nilfs->ns_segctor_sem);
310 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
313 nilfs_segctor_do_immediate_flush(sci);
315 up_write(&nilfs->ns_segctor_sem);
319 ti->ti_flags |= NILFS_TI_GC;
322 static void nilfs_transaction_unlock(struct super_block *sb)
324 struct nilfs_transaction_info *ti = current->journal_info;
325 struct the_nilfs *nilfs = sb->s_fs_info;
327 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
328 BUG_ON(ti->ti_count > 0);
330 up_write(&nilfs->ns_segctor_sem);
331 current->journal_info = ti->ti_save;
332 if (!list_empty(&ti->ti_garbage))
333 nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
336 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
337 struct nilfs_segsum_pointer *ssp,
340 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
341 unsigned blocksize = sci->sc_super->s_blocksize;
344 if (unlikely(ssp->offset + bytes > blocksize)) {
346 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
347 &segbuf->sb_segsum_buffers));
348 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
350 p = ssp->bh->b_data + ssp->offset;
351 ssp->offset += bytes;
356 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
357 * @sci: nilfs_sc_info
359 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
361 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
362 struct buffer_head *sumbh;
367 if (nilfs_doing_gc())
369 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
373 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
374 sumbytes = segbuf->sb_sum.sumbytes;
375 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
376 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
377 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
381 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
383 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
384 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
385 return -E2BIG; /* The current segment is filled up
387 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
388 return nilfs_segctor_reset_segment_buffer(sci);
391 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
393 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
396 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
397 err = nilfs_segctor_feed_segment(sci);
400 segbuf = sci->sc_curseg;
402 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
404 segbuf->sb_sum.flags |= NILFS_SS_SR;
409 * Functions for making segment summary and payloads
411 static int nilfs_segctor_segsum_block_required(
412 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
415 unsigned blocksize = sci->sc_super->s_blocksize;
416 /* Size of finfo and binfo is enough small against blocksize */
418 return ssp->offset + binfo_size +
419 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
423 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
426 sci->sc_curseg->sb_sum.nfinfo++;
427 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
428 nilfs_segctor_map_segsum_entry(
429 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
431 if (NILFS_I(inode)->i_root &&
432 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
433 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
437 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
440 struct nilfs_finfo *finfo;
441 struct nilfs_inode_info *ii;
442 struct nilfs_segment_buffer *segbuf;
445 if (sci->sc_blk_cnt == 0)
450 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
452 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
457 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
459 finfo->fi_ino = cpu_to_le64(inode->i_ino);
460 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
461 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
462 finfo->fi_cno = cpu_to_le64(cno);
464 segbuf = sci->sc_curseg;
465 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
466 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
467 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
468 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
471 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
472 struct buffer_head *bh,
476 struct nilfs_segment_buffer *segbuf;
477 int required, err = 0;
480 segbuf = sci->sc_curseg;
481 required = nilfs_segctor_segsum_block_required(
482 sci, &sci->sc_binfo_ptr, binfo_size);
483 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
484 nilfs_segctor_end_finfo(sci, inode);
485 err = nilfs_segctor_feed_segment(sci);
490 if (unlikely(required)) {
491 err = nilfs_segbuf_extend_segsum(segbuf);
495 if (sci->sc_blk_cnt == 0)
496 nilfs_segctor_begin_finfo(sci, inode);
498 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
499 /* Substitution to vblocknr is delayed until update_blocknr() */
500 nilfs_segbuf_add_file_buffer(segbuf, bh);
507 * Callback functions that enumerate, mark, and collect dirty blocks
509 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
510 struct buffer_head *bh, struct inode *inode)
514 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
518 err = nilfs_segctor_add_file_block(sci, bh, inode,
519 sizeof(struct nilfs_binfo_v));
521 sci->sc_datablk_cnt++;
525 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
526 struct buffer_head *bh,
529 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
532 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
533 struct buffer_head *bh,
536 WARN_ON(!buffer_dirty(bh));
537 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
540 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
541 struct nilfs_segsum_pointer *ssp,
542 union nilfs_binfo *binfo)
544 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
545 sci, ssp, sizeof(*binfo_v));
546 *binfo_v = binfo->bi_v;
549 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
550 struct nilfs_segsum_pointer *ssp,
551 union nilfs_binfo *binfo)
553 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
554 sci, ssp, sizeof(*vblocknr));
555 *vblocknr = binfo->bi_v.bi_vblocknr;
558 static struct nilfs_sc_operations nilfs_sc_file_ops = {
559 .collect_data = nilfs_collect_file_data,
560 .collect_node = nilfs_collect_file_node,
561 .collect_bmap = nilfs_collect_file_bmap,
562 .write_data_binfo = nilfs_write_file_data_binfo,
563 .write_node_binfo = nilfs_write_file_node_binfo,
566 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
567 struct buffer_head *bh, struct inode *inode)
571 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
575 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
577 sci->sc_datablk_cnt++;
581 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
582 struct buffer_head *bh, struct inode *inode)
584 WARN_ON(!buffer_dirty(bh));
585 return nilfs_segctor_add_file_block(sci, bh, inode,
586 sizeof(struct nilfs_binfo_dat));
589 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
590 struct nilfs_segsum_pointer *ssp,
591 union nilfs_binfo *binfo)
593 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
595 *blkoff = binfo->bi_dat.bi_blkoff;
598 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
599 struct nilfs_segsum_pointer *ssp,
600 union nilfs_binfo *binfo)
602 struct nilfs_binfo_dat *binfo_dat =
603 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
604 *binfo_dat = binfo->bi_dat;
607 static struct nilfs_sc_operations nilfs_sc_dat_ops = {
608 .collect_data = nilfs_collect_dat_data,
609 .collect_node = nilfs_collect_file_node,
610 .collect_bmap = nilfs_collect_dat_bmap,
611 .write_data_binfo = nilfs_write_dat_data_binfo,
612 .write_node_binfo = nilfs_write_dat_node_binfo,
615 static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
616 .collect_data = nilfs_collect_file_data,
617 .collect_node = NULL,
618 .collect_bmap = NULL,
619 .write_data_binfo = nilfs_write_file_data_binfo,
620 .write_node_binfo = NULL,
623 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
624 struct list_head *listp,
626 loff_t start, loff_t end)
628 struct address_space *mapping = inode->i_mapping;
630 pgoff_t index = 0, last = ULONG_MAX;
634 if (unlikely(start != 0 || end != LLONG_MAX)) {
636 * A valid range is given for sync-ing data pages. The
637 * range is rounded to per-page; extra dirty buffers
638 * may be included if blocksize < pagesize.
640 index = start >> PAGE_SHIFT;
641 last = end >> PAGE_SHIFT;
643 pagevec_init(&pvec, 0);
645 if (unlikely(index > last) ||
646 !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
647 min_t(pgoff_t, last - index,
648 PAGEVEC_SIZE - 1) + 1))
651 for (i = 0; i < pagevec_count(&pvec); i++) {
652 struct buffer_head *bh, *head;
653 struct page *page = pvec.pages[i];
655 if (unlikely(page->index > last))
659 if (!page_has_buffers(page))
660 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
663 bh = head = page_buffers(page);
665 if (!buffer_dirty(bh) || buffer_async_write(bh))
668 list_add_tail(&bh->b_assoc_buffers, listp);
670 if (unlikely(ndirties >= nlimit)) {
671 pagevec_release(&pvec);
675 } while (bh = bh->b_this_page, bh != head);
677 pagevec_release(&pvec);
682 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
683 struct list_head *listp)
685 struct nilfs_inode_info *ii = NILFS_I(inode);
686 struct address_space *mapping = &ii->i_btnode_cache;
688 struct buffer_head *bh, *head;
692 pagevec_init(&pvec, 0);
694 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
696 for (i = 0; i < pagevec_count(&pvec); i++) {
697 bh = head = page_buffers(pvec.pages[i]);
699 if (buffer_dirty(bh) &&
700 !buffer_async_write(bh)) {
702 list_add_tail(&bh->b_assoc_buffers,
705 bh = bh->b_this_page;
706 } while (bh != head);
708 pagevec_release(&pvec);
713 static void nilfs_dispose_list(struct the_nilfs *nilfs,
714 struct list_head *head, int force)
716 struct nilfs_inode_info *ii, *n;
717 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
720 while (!list_empty(head)) {
721 spin_lock(&nilfs->ns_inode_lock);
722 list_for_each_entry_safe(ii, n, head, i_dirty) {
723 list_del_init(&ii->i_dirty);
725 if (unlikely(ii->i_bh)) {
729 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
730 set_bit(NILFS_I_QUEUED, &ii->i_state);
731 list_add_tail(&ii->i_dirty,
732 &nilfs->ns_dirty_files);
736 if (nv == SC_N_INODEVEC)
739 spin_unlock(&nilfs->ns_inode_lock);
741 for (pii = ivec; nv > 0; pii++, nv--)
742 iput(&(*pii)->vfs_inode);
746 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
747 struct nilfs_root *root)
751 if (nilfs_mdt_fetch_dirty(root->ifile))
753 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
755 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
757 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
762 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
764 return list_empty(&sci->sc_dirty_files) &&
765 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
766 sci->sc_nfreesegs == 0 &&
767 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
770 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
772 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
775 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
776 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
778 spin_lock(&nilfs->ns_inode_lock);
779 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
782 spin_unlock(&nilfs->ns_inode_lock);
786 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
788 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
790 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
791 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
792 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
793 nilfs_mdt_clear_dirty(nilfs->ns_dat);
796 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
798 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
799 struct buffer_head *bh_cp;
800 struct nilfs_checkpoint *raw_cp;
803 /* XXX: this interface will be changed */
804 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
807 /* The following code is duplicated with cpfile. But, it is
808 needed to collect the checkpoint even if it was not newly
810 mark_buffer_dirty(bh_cp);
811 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
812 nilfs_cpfile_put_checkpoint(
813 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
815 WARN_ON(err == -EINVAL || err == -ENOENT);
820 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
822 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
823 struct buffer_head *bh_cp;
824 struct nilfs_checkpoint *raw_cp;
827 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
830 WARN_ON(err == -EINVAL || err == -ENOENT);
833 raw_cp->cp_snapshot_list.ssl_next = 0;
834 raw_cp->cp_snapshot_list.ssl_prev = 0;
835 raw_cp->cp_inodes_count =
836 cpu_to_le64(atomic_read(&sci->sc_root->inodes_count));
837 raw_cp->cp_blocks_count =
838 cpu_to_le64(atomic_read(&sci->sc_root->blocks_count));
839 raw_cp->cp_nblk_inc =
840 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
841 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
842 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
844 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
845 nilfs_checkpoint_clear_minor(raw_cp);
847 nilfs_checkpoint_set_minor(raw_cp);
849 nilfs_write_inode_common(sci->sc_root->ifile,
850 &raw_cp->cp_ifile_inode, 1);
851 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
858 static void nilfs_fill_in_file_bmap(struct inode *ifile,
859 struct nilfs_inode_info *ii)
862 struct buffer_head *ibh;
863 struct nilfs_inode *raw_inode;
865 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
868 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
870 nilfs_bmap_write(ii->i_bmap, raw_inode);
871 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
875 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
877 struct nilfs_inode_info *ii;
879 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
880 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
881 set_bit(NILFS_I_COLLECTED, &ii->i_state);
885 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
886 struct the_nilfs *nilfs)
888 struct buffer_head *bh_sr;
889 struct nilfs_super_root *raw_sr;
892 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
893 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
894 isz = nilfs->ns_inode_size;
895 srsz = NILFS_SR_BYTES(isz);
897 raw_sr->sr_bytes = cpu_to_le16(srsz);
898 raw_sr->sr_nongc_ctime
899 = cpu_to_le64(nilfs_doing_gc() ?
900 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
901 raw_sr->sr_flags = 0;
903 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
904 NILFS_SR_DAT_OFFSET(isz), 1);
905 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
906 NILFS_SR_CPFILE_OFFSET(isz), 1);
907 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
908 NILFS_SR_SUFILE_OFFSET(isz), 1);
909 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
912 static void nilfs_redirty_inodes(struct list_head *head)
914 struct nilfs_inode_info *ii;
916 list_for_each_entry(ii, head, i_dirty) {
917 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
918 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
922 static void nilfs_drop_collected_inodes(struct list_head *head)
924 struct nilfs_inode_info *ii;
926 list_for_each_entry(ii, head, i_dirty) {
927 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
930 clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
931 set_bit(NILFS_I_UPDATED, &ii->i_state);
935 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
937 struct list_head *listp,
938 int (*collect)(struct nilfs_sc_info *,
939 struct buffer_head *,
942 struct buffer_head *bh, *n;
946 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
947 list_del_init(&bh->b_assoc_buffers);
948 err = collect(sci, bh, inode);
951 goto dispose_buffers;
957 while (!list_empty(listp)) {
958 bh = list_first_entry(listp, struct buffer_head,
960 list_del_init(&bh->b_assoc_buffers);
966 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
968 /* Remaining number of blocks within segment buffer */
969 return sci->sc_segbuf_nblocks -
970 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
973 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
975 struct nilfs_sc_operations *sc_ops)
977 LIST_HEAD(data_buffers);
978 LIST_HEAD(node_buffers);
981 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
982 size_t n, rest = nilfs_segctor_buffer_rest(sci);
984 n = nilfs_lookup_dirty_data_buffers(
985 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
987 err = nilfs_segctor_apply_buffers(
988 sci, inode, &data_buffers,
989 sc_ops->collect_data);
990 BUG_ON(!err); /* always receive -E2BIG or true error */
994 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
996 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
997 err = nilfs_segctor_apply_buffers(
998 sci, inode, &data_buffers, sc_ops->collect_data);
1000 /* dispose node list */
1001 nilfs_segctor_apply_buffers(
1002 sci, inode, &node_buffers, NULL);
1005 sci->sc_stage.flags |= NILFS_CF_NODE;
1008 err = nilfs_segctor_apply_buffers(
1009 sci, inode, &node_buffers, sc_ops->collect_node);
1013 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1014 err = nilfs_segctor_apply_buffers(
1015 sci, inode, &node_buffers, sc_ops->collect_bmap);
1019 nilfs_segctor_end_finfo(sci, inode);
1020 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1026 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1027 struct inode *inode)
1029 LIST_HEAD(data_buffers);
1030 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1033 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1034 sci->sc_dsync_start,
1037 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1038 nilfs_collect_file_data);
1040 nilfs_segctor_end_finfo(sci, inode);
1042 /* always receive -E2BIG or true error if n > rest */
1047 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1049 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1050 struct list_head *head;
1051 struct nilfs_inode_info *ii;
1055 switch (sci->sc_stage.scnt) {
1058 sci->sc_stage.flags = 0;
1060 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1061 sci->sc_nblk_inc = 0;
1062 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1063 if (mode == SC_LSEG_DSYNC) {
1064 sci->sc_stage.scnt = NILFS_ST_DSYNC;
1069 sci->sc_stage.dirty_file_ptr = NULL;
1070 sci->sc_stage.gc_inode_ptr = NULL;
1071 if (mode == SC_FLUSH_DAT) {
1072 sci->sc_stage.scnt = NILFS_ST_DAT;
1075 sci->sc_stage.scnt++; /* Fall through */
1077 if (nilfs_doing_gc()) {
1078 head = &sci->sc_gc_inodes;
1079 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1081 list_for_each_entry_continue(ii, head, i_dirty) {
1082 err = nilfs_segctor_scan_file(
1083 sci, &ii->vfs_inode,
1084 &nilfs_sc_file_ops);
1085 if (unlikely(err)) {
1086 sci->sc_stage.gc_inode_ptr = list_entry(
1088 struct nilfs_inode_info,
1092 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1094 sci->sc_stage.gc_inode_ptr = NULL;
1096 sci->sc_stage.scnt++; /* Fall through */
1098 head = &sci->sc_dirty_files;
1099 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1101 list_for_each_entry_continue(ii, head, i_dirty) {
1102 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1104 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1105 &nilfs_sc_file_ops);
1106 if (unlikely(err)) {
1107 sci->sc_stage.dirty_file_ptr =
1108 list_entry(ii->i_dirty.prev,
1109 struct nilfs_inode_info,
1113 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1114 /* XXX: required ? */
1116 sci->sc_stage.dirty_file_ptr = NULL;
1117 if (mode == SC_FLUSH_FILE) {
1118 sci->sc_stage.scnt = NILFS_ST_DONE;
1121 sci->sc_stage.scnt++;
1122 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1124 case NILFS_ST_IFILE:
1125 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1126 &nilfs_sc_file_ops);
1129 sci->sc_stage.scnt++;
1130 /* Creating a checkpoint */
1131 err = nilfs_segctor_create_checkpoint(sci);
1135 case NILFS_ST_CPFILE:
1136 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1137 &nilfs_sc_file_ops);
1140 sci->sc_stage.scnt++; /* Fall through */
1141 case NILFS_ST_SUFILE:
1142 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1143 sci->sc_nfreesegs, &ndone);
1144 if (unlikely(err)) {
1145 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1146 sci->sc_freesegs, ndone,
1150 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1152 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1153 &nilfs_sc_file_ops);
1156 sci->sc_stage.scnt++; /* Fall through */
1159 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1163 if (mode == SC_FLUSH_DAT) {
1164 sci->sc_stage.scnt = NILFS_ST_DONE;
1167 sci->sc_stage.scnt++; /* Fall through */
1169 if (mode == SC_LSEG_SR) {
1170 /* Appending a super root */
1171 err = nilfs_segctor_add_super_root(sci);
1175 /* End of a logical segment */
1176 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1177 sci->sc_stage.scnt = NILFS_ST_DONE;
1179 case NILFS_ST_DSYNC:
1181 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1182 ii = sci->sc_dsync_inode;
1183 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1186 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1189 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1190 sci->sc_stage.scnt = NILFS_ST_DONE;
1203 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1204 * @sci: nilfs_sc_info
1205 * @nilfs: nilfs object
1207 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1208 struct the_nilfs *nilfs)
1210 struct nilfs_segment_buffer *segbuf, *prev;
1214 segbuf = nilfs_segbuf_new(sci->sc_super);
1215 if (unlikely(!segbuf))
1218 if (list_empty(&sci->sc_write_logs)) {
1219 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1220 nilfs->ns_pseg_offset, nilfs);
1221 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1222 nilfs_shift_to_next_segment(nilfs);
1223 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1226 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1227 nextnum = nilfs->ns_nextnum;
1229 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1230 /* Start from the head of a new full segment */
1234 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1235 nilfs_segbuf_map_cont(segbuf, prev);
1236 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1237 nextnum = prev->sb_nextnum;
1239 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1240 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1241 segbuf->sb_sum.seg_seq++;
1246 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1251 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1255 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1257 BUG_ON(!list_empty(&sci->sc_segbufs));
1258 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1259 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1263 nilfs_segbuf_free(segbuf);
1267 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1268 struct the_nilfs *nilfs, int nadd)
1270 struct nilfs_segment_buffer *segbuf, *prev;
1271 struct inode *sufile = nilfs->ns_sufile;
1276 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1278 * Since the segment specified with nextnum might be allocated during
1279 * the previous construction, the buffer including its segusage may
1280 * not be dirty. The following call ensures that the buffer is dirty
1281 * and will pin the buffer on memory until the sufile is written.
1283 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1287 for (i = 0; i < nadd; i++) {
1288 /* extend segment info */
1290 segbuf = nilfs_segbuf_new(sci->sc_super);
1291 if (unlikely(!segbuf))
1294 /* map this buffer to region of segment on-disk */
1295 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1296 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1298 /* allocate the next next full segment */
1299 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1303 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1304 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1306 list_add_tail(&segbuf->sb_list, &list);
1309 list_splice_tail(&list, &sci->sc_segbufs);
1313 nilfs_segbuf_free(segbuf);
1315 list_for_each_entry(segbuf, &list, sb_list) {
1316 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1317 WARN_ON(ret); /* never fails */
1319 nilfs_destroy_logs(&list);
1323 static void nilfs_free_incomplete_logs(struct list_head *logs,
1324 struct the_nilfs *nilfs)
1326 struct nilfs_segment_buffer *segbuf, *prev;
1327 struct inode *sufile = nilfs->ns_sufile;
1330 segbuf = NILFS_FIRST_SEGBUF(logs);
1331 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1332 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1333 WARN_ON(ret); /* never fails */
1335 if (atomic_read(&segbuf->sb_err)) {
1336 /* Case 1: The first segment failed */
1337 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1338 /* Case 1a: Partial segment appended into an existing
1340 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1341 segbuf->sb_fseg_end);
1342 else /* Case 1b: New full segment */
1343 set_nilfs_discontinued(nilfs);
1347 list_for_each_entry_continue(segbuf, logs, sb_list) {
1348 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1349 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1350 WARN_ON(ret); /* never fails */
1352 if (atomic_read(&segbuf->sb_err) &&
1353 segbuf->sb_segnum != nilfs->ns_nextnum)
1354 /* Case 2: extended segment (!= next) failed */
1355 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1360 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1361 struct inode *sufile)
1363 struct nilfs_segment_buffer *segbuf;
1364 unsigned long live_blocks;
1367 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1368 live_blocks = segbuf->sb_sum.nblocks +
1369 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1370 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1373 WARN_ON(ret); /* always succeed because the segusage is dirty */
1377 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1379 struct nilfs_segment_buffer *segbuf;
1382 segbuf = NILFS_FIRST_SEGBUF(logs);
1383 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1384 segbuf->sb_pseg_start -
1385 segbuf->sb_fseg_start, 0);
1386 WARN_ON(ret); /* always succeed because the segusage is dirty */
1388 list_for_each_entry_continue(segbuf, logs, sb_list) {
1389 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1391 WARN_ON(ret); /* always succeed */
1395 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1396 struct nilfs_segment_buffer *last,
1397 struct inode *sufile)
1399 struct nilfs_segment_buffer *segbuf = last;
1402 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1403 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1404 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1407 nilfs_truncate_logs(&sci->sc_segbufs, last);
1411 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1412 struct the_nilfs *nilfs, int mode)
1414 struct nilfs_cstage prev_stage = sci->sc_stage;
1417 /* Collection retry loop */
1419 sci->sc_nblk_this_inc = 0;
1420 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1422 err = nilfs_segctor_reset_segment_buffer(sci);
1426 err = nilfs_segctor_collect_blocks(sci, mode);
1427 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1431 if (unlikely(err != -E2BIG))
1434 /* The current segment is filled up */
1435 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
1438 nilfs_clear_logs(&sci->sc_segbufs);
1440 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1444 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1445 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1449 WARN_ON(err); /* do not happen */
1451 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1452 sci->sc_stage = prev_stage;
1454 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1461 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1462 struct buffer_head *new_bh)
1464 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1466 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1467 /* The caller must release old_bh */
1471 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1472 struct nilfs_segment_buffer *segbuf,
1475 struct inode *inode = NULL;
1477 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1478 unsigned long nblocks = 0, ndatablk = 0;
1479 struct nilfs_sc_operations *sc_op = NULL;
1480 struct nilfs_segsum_pointer ssp;
1481 struct nilfs_finfo *finfo = NULL;
1482 union nilfs_binfo binfo;
1483 struct buffer_head *bh, *bh_org;
1490 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1491 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1492 ssp.offset = sizeof(struct nilfs_segment_summary);
1494 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1495 if (bh == segbuf->sb_super_root)
1498 finfo = nilfs_segctor_map_segsum_entry(
1499 sci, &ssp, sizeof(*finfo));
1500 ino = le64_to_cpu(finfo->fi_ino);
1501 nblocks = le32_to_cpu(finfo->fi_nblocks);
1502 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1504 inode = bh->b_page->mapping->host;
1506 if (mode == SC_LSEG_DSYNC)
1507 sc_op = &nilfs_sc_dsync_ops;
1508 else if (ino == NILFS_DAT_INO)
1509 sc_op = &nilfs_sc_dat_ops;
1510 else /* file blocks */
1511 sc_op = &nilfs_sc_file_ops;
1515 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1518 nilfs_list_replace_buffer(bh_org, bh);
1524 sc_op->write_data_binfo(sci, &ssp, &binfo);
1526 sc_op->write_node_binfo(sci, &ssp, &binfo);
1529 if (--nblocks == 0) {
1533 } else if (ndatablk > 0)
1543 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1545 struct nilfs_segment_buffer *segbuf;
1548 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1549 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1552 nilfs_segbuf_fill_in_segsum(segbuf);
1557 static void nilfs_begin_page_io(struct page *page)
1559 if (!page || PageWriteback(page))
1560 /* For split b-tree node pages, this function may be called
1561 twice. We ignore the 2nd or later calls by this check. */
1565 clear_page_dirty_for_io(page);
1566 set_page_writeback(page);
1570 static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1572 struct nilfs_segment_buffer *segbuf;
1573 struct page *bd_page = NULL, *fs_page = NULL;
1575 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1576 struct buffer_head *bh;
1578 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1580 set_buffer_async_write(bh);
1581 if (bh->b_page != bd_page) {
1584 clear_page_dirty_for_io(bd_page);
1585 set_page_writeback(bd_page);
1586 unlock_page(bd_page);
1588 bd_page = bh->b_page;
1592 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1594 set_buffer_async_write(bh);
1595 if (bh == segbuf->sb_super_root) {
1596 if (bh->b_page != bd_page) {
1598 clear_page_dirty_for_io(bd_page);
1599 set_page_writeback(bd_page);
1600 unlock_page(bd_page);
1601 bd_page = bh->b_page;
1605 if (bh->b_page != fs_page) {
1606 nilfs_begin_page_io(fs_page);
1607 fs_page = bh->b_page;
1613 clear_page_dirty_for_io(bd_page);
1614 set_page_writeback(bd_page);
1615 unlock_page(bd_page);
1617 nilfs_begin_page_io(fs_page);
1620 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1621 struct the_nilfs *nilfs)
1625 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1626 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1630 static void nilfs_end_page_io(struct page *page, int err)
1635 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1637 * For b-tree node pages, this function may be called twice
1638 * or more because they might be split in a segment.
1640 if (PageDirty(page)) {
1642 * For pages holding split b-tree node buffers, dirty
1643 * flag on the buffers may be cleared discretely.
1644 * In that case, the page is once redirtied for
1645 * remaining buffers, and it must be cancelled if
1646 * all the buffers get cleaned later.
1649 if (nilfs_page_buffers_clean(page))
1650 __nilfs_clear_page_dirty(page);
1657 if (!nilfs_page_buffers_clean(page))
1658 __set_page_dirty_nobuffers(page);
1659 ClearPageError(page);
1661 __set_page_dirty_nobuffers(page);
1665 end_page_writeback(page);
1668 static void nilfs_abort_logs(struct list_head *logs, int err)
1670 struct nilfs_segment_buffer *segbuf;
1671 struct page *bd_page = NULL, *fs_page = NULL;
1672 struct buffer_head *bh;
1674 if (list_empty(logs))
1677 list_for_each_entry(segbuf, logs, sb_list) {
1678 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1680 clear_buffer_async_write(bh);
1681 if (bh->b_page != bd_page) {
1683 end_page_writeback(bd_page);
1684 bd_page = bh->b_page;
1688 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1690 clear_buffer_async_write(bh);
1691 if (bh == segbuf->sb_super_root) {
1692 if (bh->b_page != bd_page) {
1693 end_page_writeback(bd_page);
1694 bd_page = bh->b_page;
1698 if (bh->b_page != fs_page) {
1699 nilfs_end_page_io(fs_page, err);
1700 fs_page = bh->b_page;
1705 end_page_writeback(bd_page);
1707 nilfs_end_page_io(fs_page, err);
1710 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1711 struct the_nilfs *nilfs, int err)
1716 list_splice_tail_init(&sci->sc_write_logs, &logs);
1717 ret = nilfs_wait_on_logs(&logs);
1718 nilfs_abort_logs(&logs, ret ? : err);
1720 list_splice_tail_init(&sci->sc_segbufs, &logs);
1721 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1722 nilfs_free_incomplete_logs(&logs, nilfs);
1724 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1725 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1729 WARN_ON(ret); /* do not happen */
1732 nilfs_destroy_logs(&logs);
1735 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1736 struct nilfs_segment_buffer *segbuf)
1738 nilfs->ns_segnum = segbuf->sb_segnum;
1739 nilfs->ns_nextnum = segbuf->sb_nextnum;
1740 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1741 + segbuf->sb_sum.nblocks;
1742 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1743 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1746 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1748 struct nilfs_segment_buffer *segbuf;
1749 struct page *bd_page = NULL, *fs_page = NULL;
1750 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1751 int update_sr = false;
1753 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1754 struct buffer_head *bh;
1756 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1758 set_buffer_uptodate(bh);
1759 clear_buffer_dirty(bh);
1760 clear_buffer_async_write(bh);
1761 if (bh->b_page != bd_page) {
1763 end_page_writeback(bd_page);
1764 bd_page = bh->b_page;
1768 * We assume that the buffers which belong to the same page
1769 * continue over the buffer list.
1770 * Under this assumption, the last BHs of pages is
1771 * identifiable by the discontinuity of bh->b_page
1772 * (page != fs_page).
1774 * For B-tree node blocks, however, this assumption is not
1775 * guaranteed. The cleanup code of B-tree node pages needs
1778 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1780 set_buffer_uptodate(bh);
1781 clear_buffer_dirty(bh);
1782 clear_buffer_async_write(bh);
1783 clear_buffer_delay(bh);
1784 clear_buffer_nilfs_volatile(bh);
1785 clear_buffer_nilfs_redirected(bh);
1786 if (bh == segbuf->sb_super_root) {
1787 if (bh->b_page != bd_page) {
1788 end_page_writeback(bd_page);
1789 bd_page = bh->b_page;
1794 if (bh->b_page != fs_page) {
1795 nilfs_end_page_io(fs_page, 0);
1796 fs_page = bh->b_page;
1800 if (!nilfs_segbuf_simplex(segbuf)) {
1801 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1802 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1803 sci->sc_lseg_stime = jiffies;
1805 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1806 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1810 * Since pages may continue over multiple segment buffers,
1811 * end of the last page must be checked outside of the loop.
1814 end_page_writeback(bd_page);
1816 nilfs_end_page_io(fs_page, 0);
1818 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1820 if (nilfs_doing_gc())
1821 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1823 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1825 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1827 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1828 nilfs_set_next_segment(nilfs, segbuf);
1831 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1832 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1834 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1835 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1836 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1837 nilfs_segctor_clear_metadata_dirty(sci);
1839 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1842 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1846 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1848 nilfs_segctor_complete_write(sci);
1849 nilfs_destroy_logs(&sci->sc_write_logs);
1854 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1855 struct the_nilfs *nilfs)
1857 struct nilfs_inode_info *ii, *n;
1858 struct inode *ifile = sci->sc_root->ifile;
1860 spin_lock(&nilfs->ns_inode_lock);
1862 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1864 struct buffer_head *ibh;
1867 spin_unlock(&nilfs->ns_inode_lock);
1868 err = nilfs_ifile_get_inode_block(
1869 ifile, ii->vfs_inode.i_ino, &ibh);
1870 if (unlikely(err)) {
1871 nilfs_warning(sci->sc_super, __func__,
1872 "failed to get inode block.\n");
1875 mark_buffer_dirty(ibh);
1876 nilfs_mdt_mark_dirty(ifile);
1877 spin_lock(&nilfs->ns_inode_lock);
1878 if (likely(!ii->i_bh))
1885 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1886 set_bit(NILFS_I_BUSY, &ii->i_state);
1887 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1889 spin_unlock(&nilfs->ns_inode_lock);
1894 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1895 struct the_nilfs *nilfs)
1897 struct nilfs_transaction_info *ti = current->journal_info;
1898 struct nilfs_inode_info *ii, *n;
1900 spin_lock(&nilfs->ns_inode_lock);
1901 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1902 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1903 test_bit(NILFS_I_DIRTY, &ii->i_state))
1906 clear_bit(NILFS_I_BUSY, &ii->i_state);
1909 list_move_tail(&ii->i_dirty, &ti->ti_garbage);
1911 spin_unlock(&nilfs->ns_inode_lock);
1915 * Main procedure of segment constructor
1917 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
1919 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1922 sci->sc_stage.scnt = NILFS_ST_INIT;
1923 sci->sc_cno = nilfs->ns_cno;
1925 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
1929 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
1930 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1932 if (nilfs_segctor_clean(sci))
1936 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
1938 err = nilfs_segctor_begin_construction(sci, nilfs);
1942 /* Update time stamp */
1943 sci->sc_seg_ctime = get_seconds();
1945 err = nilfs_segctor_collect(sci, nilfs, mode);
1949 /* Avoid empty segment */
1950 if (sci->sc_stage.scnt == NILFS_ST_DONE &&
1951 nilfs_segbuf_empty(sci->sc_curseg)) {
1952 nilfs_segctor_abort_construction(sci, nilfs, 1);
1956 err = nilfs_segctor_assign(sci, mode);
1960 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
1961 nilfs_segctor_fill_in_file_bmap(sci);
1963 if (mode == SC_LSEG_SR &&
1964 sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
1965 err = nilfs_segctor_fill_in_checkpoint(sci);
1967 goto failed_to_write;
1969 nilfs_segctor_fill_in_super_root(sci, nilfs);
1971 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
1973 /* Write partial segments */
1974 nilfs_segctor_prepare_write(sci);
1976 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
1977 nilfs->ns_crc_seed);
1979 err = nilfs_segctor_write(sci, nilfs);
1981 goto failed_to_write;
1983 if (sci->sc_stage.scnt == NILFS_ST_DONE ||
1984 nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
1986 * At this point, we avoid double buffering
1987 * for blocksize < pagesize because page dirty
1988 * flag is turned off during write and dirty
1989 * buffers are not properly collected for
1990 * pages crossing over segments.
1992 err = nilfs_segctor_wait(sci);
1994 goto failed_to_write;
1996 } while (sci->sc_stage.scnt != NILFS_ST_DONE);
1999 nilfs_segctor_drop_written_files(sci, nilfs);
2003 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2004 nilfs_redirty_inodes(&sci->sc_dirty_files);
2007 if (nilfs_doing_gc())
2008 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2009 nilfs_segctor_abort_construction(sci, nilfs, err);
2014 * nilfs_segctor_start_timer - set timer of background write
2015 * @sci: nilfs_sc_info
2017 * If the timer has already been set, it ignores the new request.
2018 * This function MUST be called within a section locking the segment
2021 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2023 spin_lock(&sci->sc_state_lock);
2024 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2025 sci->sc_timer.expires = jiffies + sci->sc_interval;
2026 add_timer(&sci->sc_timer);
2027 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2029 spin_unlock(&sci->sc_state_lock);
2032 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2034 spin_lock(&sci->sc_state_lock);
2035 if (!(sci->sc_flush_request & (1 << bn))) {
2036 unsigned long prev_req = sci->sc_flush_request;
2038 sci->sc_flush_request |= (1 << bn);
2040 wake_up(&sci->sc_wait_daemon);
2042 spin_unlock(&sci->sc_state_lock);
2046 * nilfs_flush_segment - trigger a segment construction for resource control
2048 * @ino: inode number of the file to be flushed out.
2050 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2052 struct the_nilfs *nilfs = sb->s_fs_info;
2053 struct nilfs_sc_info *sci = nilfs->ns_writer;
2055 if (!sci || nilfs_doing_construction())
2057 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2058 /* assign bit 0 to data files */
2061 struct nilfs_segctor_wait_request {
2068 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2070 struct nilfs_segctor_wait_request wait_req;
2073 spin_lock(&sci->sc_state_lock);
2074 init_wait(&wait_req.wq);
2076 atomic_set(&wait_req.done, 0);
2077 wait_req.seq = ++sci->sc_seq_request;
2078 spin_unlock(&sci->sc_state_lock);
2080 init_waitqueue_entry(&wait_req.wq, current);
2081 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2082 set_current_state(TASK_INTERRUPTIBLE);
2083 wake_up(&sci->sc_wait_daemon);
2086 if (atomic_read(&wait_req.done)) {
2090 if (!signal_pending(current)) {
2097 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2101 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2103 struct nilfs_segctor_wait_request *wrq, *n;
2104 unsigned long flags;
2106 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2107 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2109 if (!atomic_read(&wrq->done) &&
2110 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2112 atomic_set(&wrq->done, 1);
2114 if (atomic_read(&wrq->done)) {
2115 wrq->wq.func(&wrq->wq,
2116 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2120 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2124 * nilfs_construct_segment - construct a logical segment
2127 * Return Value: On success, 0 is retured. On errors, one of the following
2128 * negative error code is returned.
2130 * %-EROFS - Read only filesystem.
2134 * %-ENOSPC - No space left on device (only in a panic state).
2136 * %-ERESTARTSYS - Interrupted.
2138 * %-ENOMEM - Insufficient memory available.
2140 int nilfs_construct_segment(struct super_block *sb)
2142 struct the_nilfs *nilfs = sb->s_fs_info;
2143 struct nilfs_sc_info *sci = nilfs->ns_writer;
2144 struct nilfs_transaction_info *ti;
2150 /* A call inside transactions causes a deadlock. */
2151 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2153 err = nilfs_segctor_sync(sci);
2158 * nilfs_construct_dsync_segment - construct a data-only logical segment
2160 * @inode: inode whose data blocks should be written out
2161 * @start: start byte offset
2162 * @end: end byte offset (inclusive)
2164 * Return Value: On success, 0 is retured. On errors, one of the following
2165 * negative error code is returned.
2167 * %-EROFS - Read only filesystem.
2171 * %-ENOSPC - No space left on device (only in a panic state).
2173 * %-ERESTARTSYS - Interrupted.
2175 * %-ENOMEM - Insufficient memory available.
2177 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2178 loff_t start, loff_t end)
2180 struct the_nilfs *nilfs = sb->s_fs_info;
2181 struct nilfs_sc_info *sci = nilfs->ns_writer;
2182 struct nilfs_inode_info *ii;
2183 struct nilfs_transaction_info ti;
2189 nilfs_transaction_lock(sb, &ti, 0);
2191 ii = NILFS_I(inode);
2192 if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) ||
2193 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2194 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2195 nilfs_discontinued(nilfs)) {
2196 nilfs_transaction_unlock(sb);
2197 err = nilfs_segctor_sync(sci);
2201 spin_lock(&nilfs->ns_inode_lock);
2202 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2203 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2204 spin_unlock(&nilfs->ns_inode_lock);
2205 nilfs_transaction_unlock(sb);
2208 spin_unlock(&nilfs->ns_inode_lock);
2209 sci->sc_dsync_inode = ii;
2210 sci->sc_dsync_start = start;
2211 sci->sc_dsync_end = end;
2213 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2215 nilfs_transaction_unlock(sb);
2219 #define FLUSH_FILE_BIT (0x1) /* data file only */
2220 #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2223 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2224 * @sci: segment constructor object
2226 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2228 spin_lock(&sci->sc_state_lock);
2229 sci->sc_seq_accepted = sci->sc_seq_request;
2230 spin_unlock(&sci->sc_state_lock);
2231 del_timer_sync(&sci->sc_timer);
2235 * nilfs_segctor_notify - notify the result of request to caller threads
2236 * @sci: segment constructor object
2237 * @mode: mode of log forming
2238 * @err: error code to be notified
2240 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2242 /* Clear requests (even when the construction failed) */
2243 spin_lock(&sci->sc_state_lock);
2245 if (mode == SC_LSEG_SR) {
2246 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2247 sci->sc_seq_done = sci->sc_seq_accepted;
2248 nilfs_segctor_wakeup(sci, err);
2249 sci->sc_flush_request = 0;
2251 if (mode == SC_FLUSH_FILE)
2252 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2253 else if (mode == SC_FLUSH_DAT)
2254 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2256 /* re-enable timer if checkpoint creation was not done */
2257 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2258 time_before(jiffies, sci->sc_timer.expires))
2259 add_timer(&sci->sc_timer);
2261 spin_unlock(&sci->sc_state_lock);
2265 * nilfs_segctor_construct - form logs and write them to disk
2266 * @sci: segment constructor object
2267 * @mode: mode of log forming
2269 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2271 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2272 struct nilfs_super_block **sbp;
2275 nilfs_segctor_accept(sci);
2277 if (nilfs_discontinued(nilfs))
2279 if (!nilfs_segctor_confirm(sci))
2280 err = nilfs_segctor_do_construct(sci, mode);
2283 if (mode != SC_FLUSH_DAT)
2284 atomic_set(&nilfs->ns_ndirtyblks, 0);
2285 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2286 nilfs_discontinued(nilfs)) {
2287 down_write(&nilfs->ns_sem);
2289 sbp = nilfs_prepare_super(sci->sc_super,
2290 nilfs_sb_will_flip(nilfs));
2292 nilfs_set_log_cursor(sbp[0], nilfs);
2293 err = nilfs_commit_super(sci->sc_super,
2296 up_write(&nilfs->ns_sem);
2300 nilfs_segctor_notify(sci, mode, err);
2304 static void nilfs_construction_timeout(unsigned long data)
2306 struct task_struct *p = (struct task_struct *)data;
2311 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2313 struct nilfs_inode_info *ii, *n;
2315 list_for_each_entry_safe(ii, n, head, i_dirty) {
2316 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2318 list_del_init(&ii->i_dirty);
2319 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2320 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2321 iput(&ii->vfs_inode);
2325 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2328 struct the_nilfs *nilfs = sb->s_fs_info;
2329 struct nilfs_sc_info *sci = nilfs->ns_writer;
2330 struct nilfs_transaction_info ti;
2336 nilfs_transaction_lock(sb, &ti, 1);
2338 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2342 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2343 if (unlikely(err)) {
2344 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2348 sci->sc_freesegs = kbufs[4];
2349 sci->sc_nfreesegs = argv[4].v_nmembs;
2350 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2353 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2354 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2359 nilfs_warning(sb, __func__,
2360 "segment construction failed. (err=%d)", err);
2361 set_current_state(TASK_INTERRUPTIBLE);
2362 schedule_timeout(sci->sc_interval);
2364 if (nilfs_test_opt(nilfs, DISCARD)) {
2365 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2369 "NILFS warning: error %d on discard request, "
2370 "turning discards off for the device\n", ret);
2371 nilfs_clear_opt(nilfs, DISCARD);
2376 sci->sc_freesegs = NULL;
2377 sci->sc_nfreesegs = 0;
2378 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2379 nilfs_transaction_unlock(sb);
2383 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2385 struct nilfs_transaction_info ti;
2387 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2388 nilfs_segctor_construct(sci, mode);
2391 * Unclosed segment should be retried. We do this using sc_timer.
2392 * Timeout of sc_timer will invoke complete construction which leads
2393 * to close the current logical segment.
2395 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2396 nilfs_segctor_start_timer(sci);
2398 nilfs_transaction_unlock(sci->sc_super);
2401 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2406 spin_lock(&sci->sc_state_lock);
2407 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2408 SC_FLUSH_DAT : SC_FLUSH_FILE;
2409 spin_unlock(&sci->sc_state_lock);
2412 err = nilfs_segctor_do_construct(sci, mode);
2414 spin_lock(&sci->sc_state_lock);
2415 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2416 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2417 spin_unlock(&sci->sc_state_lock);
2419 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2422 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2424 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2425 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2426 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2427 return SC_FLUSH_FILE;
2428 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2429 return SC_FLUSH_DAT;
2435 * nilfs_segctor_thread - main loop of the segment constructor thread.
2436 * @arg: pointer to a struct nilfs_sc_info.
2438 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2439 * to execute segment constructions.
2441 static int nilfs_segctor_thread(void *arg)
2443 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2444 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2447 sci->sc_timer.data = (unsigned long)current;
2448 sci->sc_timer.function = nilfs_construction_timeout;
2451 sci->sc_task = current;
2452 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2454 "segctord starting. Construction interval = %lu seconds, "
2455 "CP frequency < %lu seconds\n",
2456 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2458 spin_lock(&sci->sc_state_lock);
2463 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2466 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2468 else if (!sci->sc_flush_request)
2471 mode = nilfs_segctor_flush_mode(sci);
2473 spin_unlock(&sci->sc_state_lock);
2474 nilfs_segctor_thread_construct(sci, mode);
2475 spin_lock(&sci->sc_state_lock);
2480 if (freezing(current)) {
2481 spin_unlock(&sci->sc_state_lock);
2483 spin_lock(&sci->sc_state_lock);
2486 int should_sleep = 1;
2488 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2489 TASK_INTERRUPTIBLE);
2491 if (sci->sc_seq_request != sci->sc_seq_done)
2493 else if (sci->sc_flush_request)
2495 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2496 should_sleep = time_before(jiffies,
2497 sci->sc_timer.expires);
2500 spin_unlock(&sci->sc_state_lock);
2502 spin_lock(&sci->sc_state_lock);
2504 finish_wait(&sci->sc_wait_daemon, &wait);
2505 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2506 time_after_eq(jiffies, sci->sc_timer.expires));
2508 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2509 set_nilfs_discontinued(nilfs);
2514 spin_unlock(&sci->sc_state_lock);
2517 sci->sc_task = NULL;
2518 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2522 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2524 struct task_struct *t;
2526 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2528 int err = PTR_ERR(t);
2530 printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
2534 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2538 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2539 __acquires(&sci->sc_state_lock)
2540 __releases(&sci->sc_state_lock)
2542 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2544 while (sci->sc_task) {
2545 wake_up(&sci->sc_wait_daemon);
2546 spin_unlock(&sci->sc_state_lock);
2547 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2548 spin_lock(&sci->sc_state_lock);
2553 * Setup & clean-up functions
2555 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2556 struct nilfs_root *root)
2558 struct the_nilfs *nilfs = sb->s_fs_info;
2559 struct nilfs_sc_info *sci;
2561 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2567 nilfs_get_root(root);
2568 sci->sc_root = root;
2570 init_waitqueue_head(&sci->sc_wait_request);
2571 init_waitqueue_head(&sci->sc_wait_daemon);
2572 init_waitqueue_head(&sci->sc_wait_task);
2573 spin_lock_init(&sci->sc_state_lock);
2574 INIT_LIST_HEAD(&sci->sc_dirty_files);
2575 INIT_LIST_HEAD(&sci->sc_segbufs);
2576 INIT_LIST_HEAD(&sci->sc_write_logs);
2577 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2578 init_timer(&sci->sc_timer);
2580 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2581 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2582 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2584 if (nilfs->ns_interval)
2585 sci->sc_interval = HZ * nilfs->ns_interval;
2586 if (nilfs->ns_watermark)
2587 sci->sc_watermark = nilfs->ns_watermark;
2591 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2593 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2595 /* The segctord thread was stopped and its timer was removed.
2596 But some tasks remain. */
2598 struct nilfs_transaction_info ti;
2600 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2601 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2602 nilfs_transaction_unlock(sci->sc_super);
2604 } while (ret && retrycount-- > 0);
2608 * nilfs_segctor_destroy - destroy the segment constructor.
2609 * @sci: nilfs_sc_info
2611 * nilfs_segctor_destroy() kills the segctord thread and frees
2612 * the nilfs_sc_info struct.
2613 * Caller must hold the segment semaphore.
2615 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2617 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2620 up_write(&nilfs->ns_segctor_sem);
2622 spin_lock(&sci->sc_state_lock);
2623 nilfs_segctor_kill_thread(sci);
2624 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2625 || sci->sc_seq_request != sci->sc_seq_done);
2626 spin_unlock(&sci->sc_state_lock);
2628 if (flag || !nilfs_segctor_confirm(sci))
2629 nilfs_segctor_write_out(sci);
2631 if (!list_empty(&sci->sc_dirty_files)) {
2632 nilfs_warning(sci->sc_super, __func__,
2633 "dirty file(s) after the final construction\n");
2634 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2637 WARN_ON(!list_empty(&sci->sc_segbufs));
2638 WARN_ON(!list_empty(&sci->sc_write_logs));
2640 nilfs_put_root(sci->sc_root);
2642 down_write(&nilfs->ns_segctor_sem);
2644 del_timer_sync(&sci->sc_timer);
2649 * nilfs_attach_log_writer - attach log writer
2650 * @sb: super block instance
2651 * @root: root object of the current filesystem tree
2653 * This allocates a log writer object, initializes it, and starts the
2656 * Return Value: On success, 0 is returned. On error, one of the following
2657 * negative error code is returned.
2659 * %-ENOMEM - Insufficient memory available.
2661 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2663 struct the_nilfs *nilfs = sb->s_fs_info;
2666 if (nilfs->ns_writer) {
2668 * This happens if the filesystem was remounted
2669 * read/write after nilfs_error degenerated it into a
2672 nilfs_detach_log_writer(sb);
2675 nilfs->ns_writer = nilfs_segctor_new(sb, root);
2676 if (!nilfs->ns_writer)
2679 err = nilfs_segctor_start_thread(nilfs->ns_writer);
2681 kfree(nilfs->ns_writer);
2682 nilfs->ns_writer = NULL;
2688 * nilfs_detach_log_writer - destroy log writer
2689 * @sb: super block instance
2691 * This kills log writer daemon, frees the log writer object, and
2692 * destroys list of dirty files.
2694 void nilfs_detach_log_writer(struct super_block *sb)
2696 struct the_nilfs *nilfs = sb->s_fs_info;
2697 LIST_HEAD(garbage_list);
2699 down_write(&nilfs->ns_segctor_sem);
2700 if (nilfs->ns_writer) {
2701 nilfs_segctor_destroy(nilfs->ns_writer);
2702 nilfs->ns_writer = NULL;
2705 /* Force to free the list of dirty files */
2706 spin_lock(&nilfs->ns_inode_lock);
2707 if (!list_empty(&nilfs->ns_dirty_files)) {
2708 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2709 nilfs_warning(sb, __func__,
2710 "Hit dirty file after stopped log writer\n");
2712 spin_unlock(&nilfs->ns_inode_lock);
2713 up_write(&nilfs->ns_segctor_sem);
2715 nilfs_dispose_list(nilfs, &garbage_list, 1);