4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
54 bh->b_end_io = handler;
55 bh->b_private = private;
58 static int sync_buffer(void *word)
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
67 blk_run_address_space(bd->bd_inode->i_mapping);
72 void fastcall __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 void fastcall unlock_buffer(struct buffer_head *bh)
81 clear_buffer_locked(bh);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head * bh)
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
97 __clear_page_buffers(struct page *page)
99 ClearPagePrivate(page);
100 set_page_private(page, 0);
101 page_cache_release(page);
104 static void buffer_io_error(struct buffer_head *bh)
106 char b[BDEVNAME_SIZE];
108 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh->b_bdev, b),
110 (unsigned long long)bh->b_blocknr);
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
120 set_buffer_uptodate(bh);
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh);
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
131 char b[BDEVNAME_SIZE];
134 set_buffer_uptodate(bh);
136 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
138 printk(KERN_WARNING "lost page write due to "
140 bdevname(bh->b_bdev, b));
142 set_buffer_write_io_error(bh);
143 clear_buffer_uptodate(bh);
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
153 int sync_blockdev(struct block_device *bdev)
158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
161 EXPORT_SYMBOL(sync_blockdev);
163 static void __fsync_super(struct super_block *sb)
165 sync_inodes_sb(sb, 0);
168 if (sb->s_dirt && sb->s_op->write_super)
169 sb->s_op->write_super(sb);
171 if (sb->s_op->sync_fs)
172 sb->s_op->sync_fs(sb, 1);
173 sync_blockdev(sb->s_bdev);
174 sync_inodes_sb(sb, 1);
178 * Write out and wait upon all dirty data associated with this
179 * superblock. Filesystem data as well as the underlying block
180 * device. Takes the superblock lock.
182 int fsync_super(struct super_block *sb)
185 return sync_blockdev(sb->s_bdev);
189 * Write out and wait upon all dirty data associated with this
190 * device. Filesystem data as well as the underlying block
191 * device. Takes the superblock lock.
193 int fsync_bdev(struct block_device *bdev)
195 struct super_block *sb = get_super(bdev);
197 int res = fsync_super(sb);
201 return sync_blockdev(bdev);
205 * freeze_bdev -- lock a filesystem and force it into a consistent state
206 * @bdev: blockdevice to lock
208 * This takes the block device bd_mount_mutex to make sure no new mounts
209 * happen on bdev until thaw_bdev() is called.
210 * If a superblock is found on this device, we take the s_umount semaphore
211 * on it to make sure nobody unmounts until the snapshot creation is done.
213 struct super_block *freeze_bdev(struct block_device *bdev)
215 struct super_block *sb;
217 mutex_lock(&bdev->bd_mount_mutex);
218 sb = get_super(bdev);
219 if (sb && !(sb->s_flags & MS_RDONLY)) {
220 sb->s_frozen = SB_FREEZE_WRITE;
225 sb->s_frozen = SB_FREEZE_TRANS;
228 sync_blockdev(sb->s_bdev);
230 if (sb->s_op->write_super_lockfs)
231 sb->s_op->write_super_lockfs(sb);
235 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
237 EXPORT_SYMBOL(freeze_bdev);
240 * thaw_bdev -- unlock filesystem
241 * @bdev: blockdevice to unlock
242 * @sb: associated superblock
244 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
246 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
249 BUG_ON(sb->s_bdev != bdev);
251 if (sb->s_op->unlockfs)
252 sb->s_op->unlockfs(sb);
253 sb->s_frozen = SB_UNFROZEN;
255 wake_up(&sb->s_wait_unfrozen);
259 mutex_unlock(&bdev->bd_mount_mutex);
261 EXPORT_SYMBOL(thaw_bdev);
264 * sync everything. Start out by waking pdflush, because that writes back
265 * all queues in parallel.
267 static void do_sync(unsigned long wait)
270 sync_inodes(0); /* All mappings, inodes and their blockdevs */
272 sync_supers(); /* Write the superblocks */
273 sync_filesystems(0); /* Start syncing the filesystems */
274 sync_filesystems(wait); /* Waitingly sync the filesystems */
275 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
277 printk("Emergency Sync complete\n");
278 if (unlikely(laptop_mode))
279 laptop_sync_completion();
282 asmlinkage long sys_sync(void)
288 void emergency_sync(void)
290 pdflush_operation(do_sync, 0);
294 * Generic function to fsync a file.
296 * filp may be NULL if called via the msync of a vma.
299 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
301 struct inode * inode = dentry->d_inode;
302 struct super_block * sb;
305 /* sync the inode to buffers */
306 ret = write_inode_now(inode, 0);
308 /* sync the superblock to buffers */
311 if (sb->s_op->write_super)
312 sb->s_op->write_super(sb);
315 /* .. finally sync the buffers to disk */
316 err = sync_blockdev(sb->s_bdev);
322 long do_fsync(struct file *file, int datasync)
326 struct address_space *mapping = file->f_mapping;
328 if (!file->f_op || !file->f_op->fsync) {
329 /* Why? We can still call filemap_fdatawrite */
334 current->flags |= PF_SYNCWRITE;
335 ret = filemap_fdatawrite(mapping);
338 * We need to protect against concurrent writers, which could cause
339 * livelocks in fsync_buffers_list().
341 mutex_lock(&mapping->host->i_mutex);
342 err = file->f_op->fsync(file, file->f_dentry, datasync);
345 mutex_unlock(&mapping->host->i_mutex);
346 err = filemap_fdatawait(mapping);
349 current->flags &= ~PF_SYNCWRITE;
354 static long __do_fsync(unsigned int fd, int datasync)
361 ret = do_fsync(file, datasync);
367 asmlinkage long sys_fsync(unsigned int fd)
369 return __do_fsync(fd, 0);
372 asmlinkage long sys_fdatasync(unsigned int fd)
374 return __do_fsync(fd, 1);
378 * Various filesystems appear to want __find_get_block to be non-blocking.
379 * But it's the page lock which protects the buffers. To get around this,
380 * we get exclusion from try_to_free_buffers with the blockdev mapping's
383 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
384 * may be quite high. This code could TryLock the page, and if that
385 * succeeds, there is no need to take private_lock. (But if
386 * private_lock is contended then so is mapping->tree_lock).
388 static struct buffer_head *
389 __find_get_block_slow(struct block_device *bdev, sector_t block)
391 struct inode *bd_inode = bdev->bd_inode;
392 struct address_space *bd_mapping = bd_inode->i_mapping;
393 struct buffer_head *ret = NULL;
395 struct buffer_head *bh;
396 struct buffer_head *head;
400 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
401 page = find_get_page(bd_mapping, index);
405 spin_lock(&bd_mapping->private_lock);
406 if (!page_has_buffers(page))
408 head = page_buffers(page);
411 if (bh->b_blocknr == block) {
416 if (!buffer_mapped(bh))
418 bh = bh->b_this_page;
419 } while (bh != head);
421 /* we might be here because some of the buffers on this page are
422 * not mapped. This is due to various races between
423 * file io on the block device and getblk. It gets dealt with
424 * elsewhere, don't buffer_error if we had some unmapped buffers
427 printk("__find_get_block_slow() failed. "
428 "block=%llu, b_blocknr=%llu\n",
429 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
430 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
431 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
434 spin_unlock(&bd_mapping->private_lock);
435 page_cache_release(page);
440 /* If invalidate_buffers() will trash dirty buffers, it means some kind
441 of fs corruption is going on. Trashing dirty data always imply losing
442 information that was supposed to be just stored on the physical layer
445 Thus invalidate_buffers in general usage is not allwowed to trash
446 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
447 be preserved. These buffers are simply skipped.
449 We also skip buffers which are still in use. For example this can
450 happen if a userspace program is reading the block device.
452 NOTE: In the case where the user removed a removable-media-disk even if
453 there's still dirty data not synced on disk (due a bug in the device driver
454 or due an error of the user), by not destroying the dirty buffers we could
455 generate corruption also on the next media inserted, thus a parameter is
456 necessary to handle this case in the most safe way possible (trying
457 to not corrupt also the new disk inserted with the data belonging to
458 the old now corrupted disk). Also for the ramdisk the natural thing
459 to do in order to release the ramdisk memory is to destroy dirty buffers.
461 These are two special cases. Normal usage imply the device driver
462 to issue a sync on the device (without waiting I/O completion) and
463 then an invalidate_buffers call that doesn't trash dirty buffers.
465 For handling cache coherency with the blkdev pagecache the 'update' case
466 is been introduced. It is needed to re-read from disk any pinned
467 buffer. NOTE: re-reading from disk is destructive so we can do it only
468 when we assume nobody is changing the buffercache under our I/O and when
469 we think the disk contains more recent information than the buffercache.
470 The update == 1 pass marks the buffers we need to update, the update == 2
471 pass does the actual I/O. */
472 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
474 invalidate_bh_lrus();
476 * FIXME: what about destroy_dirty_buffers?
477 * We really want to use invalidate_inode_pages2() for
478 * that, but not until that's cleaned up.
480 invalidate_inode_pages(bdev->bd_inode->i_mapping);
484 * Kick pdflush then try to free up some ZONE_NORMAL memory.
486 static void free_more_memory(void)
491 wakeup_pdflush(1024);
494 for_each_pgdat(pgdat) {
495 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
497 try_to_free_pages(zones, GFP_NOFS);
502 * I/O completion handler for block_read_full_page() - pages
503 * which come unlocked at the end of I/O.
505 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
508 struct buffer_head *first;
509 struct buffer_head *tmp;
511 int page_uptodate = 1;
513 BUG_ON(!buffer_async_read(bh));
517 set_buffer_uptodate(bh);
519 clear_buffer_uptodate(bh);
520 if (printk_ratelimit())
526 * Be _very_ careful from here on. Bad things can happen if
527 * two buffer heads end IO at almost the same time and both
528 * decide that the page is now completely done.
530 first = page_buffers(page);
531 local_irq_save(flags);
532 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
533 clear_buffer_async_read(bh);
537 if (!buffer_uptodate(tmp))
539 if (buffer_async_read(tmp)) {
540 BUG_ON(!buffer_locked(tmp));
543 tmp = tmp->b_this_page;
545 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
546 local_irq_restore(flags);
549 * If none of the buffers had errors and they are all
550 * uptodate then we can set the page uptodate.
552 if (page_uptodate && !PageError(page))
553 SetPageUptodate(page);
558 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
559 local_irq_restore(flags);
564 * Completion handler for block_write_full_page() - pages which are unlocked
565 * during I/O, and which have PageWriteback cleared upon I/O completion.
567 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
569 char b[BDEVNAME_SIZE];
571 struct buffer_head *first;
572 struct buffer_head *tmp;
575 BUG_ON(!buffer_async_write(bh));
579 set_buffer_uptodate(bh);
581 if (printk_ratelimit()) {
583 printk(KERN_WARNING "lost page write due to "
585 bdevname(bh->b_bdev, b));
587 set_bit(AS_EIO, &page->mapping->flags);
588 clear_buffer_uptodate(bh);
592 first = page_buffers(page);
593 local_irq_save(flags);
594 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
596 clear_buffer_async_write(bh);
598 tmp = bh->b_this_page;
600 if (buffer_async_write(tmp)) {
601 BUG_ON(!buffer_locked(tmp));
604 tmp = tmp->b_this_page;
606 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
607 local_irq_restore(flags);
608 end_page_writeback(page);
612 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
613 local_irq_restore(flags);
618 * If a page's buffers are under async readin (end_buffer_async_read
619 * completion) then there is a possibility that another thread of
620 * control could lock one of the buffers after it has completed
621 * but while some of the other buffers have not completed. This
622 * locked buffer would confuse end_buffer_async_read() into not unlocking
623 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
624 * that this buffer is not under async I/O.
626 * The page comes unlocked when it has no locked buffer_async buffers
629 * PageLocked prevents anyone starting new async I/O reads any of
632 * PageWriteback is used to prevent simultaneous writeout of the same
635 * PageLocked prevents anyone from starting writeback of a page which is
636 * under read I/O (PageWriteback is only ever set against a locked page).
638 static void mark_buffer_async_read(struct buffer_head *bh)
640 bh->b_end_io = end_buffer_async_read;
641 set_buffer_async_read(bh);
644 void mark_buffer_async_write(struct buffer_head *bh)
646 bh->b_end_io = end_buffer_async_write;
647 set_buffer_async_write(bh);
649 EXPORT_SYMBOL(mark_buffer_async_write);
653 * fs/buffer.c contains helper functions for buffer-backed address space's
654 * fsync functions. A common requirement for buffer-based filesystems is
655 * that certain data from the backing blockdev needs to be written out for
656 * a successful fsync(). For example, ext2 indirect blocks need to be
657 * written back and waited upon before fsync() returns.
659 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
660 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
661 * management of a list of dependent buffers at ->i_mapping->private_list.
663 * Locking is a little subtle: try_to_free_buffers() will remove buffers
664 * from their controlling inode's queue when they are being freed. But
665 * try_to_free_buffers() will be operating against the *blockdev* mapping
666 * at the time, not against the S_ISREG file which depends on those buffers.
667 * So the locking for private_list is via the private_lock in the address_space
668 * which backs the buffers. Which is different from the address_space
669 * against which the buffers are listed. So for a particular address_space,
670 * mapping->private_lock does *not* protect mapping->private_list! In fact,
671 * mapping->private_list will always be protected by the backing blockdev's
674 * Which introduces a requirement: all buffers on an address_space's
675 * ->private_list must be from the same address_space: the blockdev's.
677 * address_spaces which do not place buffers at ->private_list via these
678 * utility functions are free to use private_lock and private_list for
679 * whatever they want. The only requirement is that list_empty(private_list)
680 * be true at clear_inode() time.
682 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
683 * filesystems should do that. invalidate_inode_buffers() should just go
684 * BUG_ON(!list_empty).
686 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
687 * take an address_space, not an inode. And it should be called
688 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
691 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
692 * list if it is already on a list. Because if the buffer is on a list,
693 * it *must* already be on the right one. If not, the filesystem is being
694 * silly. This will save a ton of locking. But first we have to ensure
695 * that buffers are taken *off* the old inode's list when they are freed
696 * (presumably in truncate). That requires careful auditing of all
697 * filesystems (do it inside bforget()). It could also be done by bringing
702 * The buffer's backing address_space's private_lock must be held
704 static inline void __remove_assoc_queue(struct buffer_head *bh)
706 list_del_init(&bh->b_assoc_buffers);
709 int inode_has_buffers(struct inode *inode)
711 return !list_empty(&inode->i_data.private_list);
715 * osync is designed to support O_SYNC io. It waits synchronously for
716 * all already-submitted IO to complete, but does not queue any new
717 * writes to the disk.
719 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
720 * you dirty the buffers, and then use osync_inode_buffers to wait for
721 * completion. Any other dirty buffers which are not yet queued for
722 * write will not be flushed to disk by the osync.
724 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
726 struct buffer_head *bh;
732 list_for_each_prev(p, list) {
734 if (buffer_locked(bh)) {
738 if (!buffer_uptodate(bh))
750 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
752 * @mapping: the mapping which wants those buffers written
754 * Starts I/O against the buffers at mapping->private_list, and waits upon
757 * Basically, this is a convenience function for fsync().
758 * @mapping is a file or directory which needs those buffers to be written for
759 * a successful fsync().
761 int sync_mapping_buffers(struct address_space *mapping)
763 struct address_space *buffer_mapping = mapping->assoc_mapping;
765 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
768 return fsync_buffers_list(&buffer_mapping->private_lock,
769 &mapping->private_list);
771 EXPORT_SYMBOL(sync_mapping_buffers);
774 * Called when we've recently written block `bblock', and it is known that
775 * `bblock' was for a buffer_boundary() buffer. This means that the block at
776 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
777 * dirty, schedule it for IO. So that indirects merge nicely with their data.
779 void write_boundary_block(struct block_device *bdev,
780 sector_t bblock, unsigned blocksize)
782 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
784 if (buffer_dirty(bh))
785 ll_rw_block(WRITE, 1, &bh);
790 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
792 struct address_space *mapping = inode->i_mapping;
793 struct address_space *buffer_mapping = bh->b_page->mapping;
795 mark_buffer_dirty(bh);
796 if (!mapping->assoc_mapping) {
797 mapping->assoc_mapping = buffer_mapping;
799 if (mapping->assoc_mapping != buffer_mapping)
802 if (list_empty(&bh->b_assoc_buffers)) {
803 spin_lock(&buffer_mapping->private_lock);
804 list_move_tail(&bh->b_assoc_buffers,
805 &mapping->private_list);
806 spin_unlock(&buffer_mapping->private_lock);
809 EXPORT_SYMBOL(mark_buffer_dirty_inode);
812 * Add a page to the dirty page list.
814 * It is a sad fact of life that this function is called from several places
815 * deeply under spinlocking. It may not sleep.
817 * If the page has buffers, the uptodate buffers are set dirty, to preserve
818 * dirty-state coherency between the page and the buffers. It the page does
819 * not have buffers then when they are later attached they will all be set
822 * The buffers are dirtied before the page is dirtied. There's a small race
823 * window in which a writepage caller may see the page cleanness but not the
824 * buffer dirtiness. That's fine. If this code were to set the page dirty
825 * before the buffers, a concurrent writepage caller could clear the page dirty
826 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
827 * page on the dirty page list.
829 * We use private_lock to lock against try_to_free_buffers while using the
830 * page's buffer list. Also use this to protect against clean buffers being
831 * added to the page after it was set dirty.
833 * FIXME: may need to call ->reservepage here as well. That's rather up to the
834 * address_space though.
836 int __set_page_dirty_buffers(struct page *page)
838 struct address_space * const mapping = page->mapping;
840 spin_lock(&mapping->private_lock);
841 if (page_has_buffers(page)) {
842 struct buffer_head *head = page_buffers(page);
843 struct buffer_head *bh = head;
846 set_buffer_dirty(bh);
847 bh = bh->b_this_page;
848 } while (bh != head);
850 spin_unlock(&mapping->private_lock);
852 if (!TestSetPageDirty(page)) {
853 write_lock_irq(&mapping->tree_lock);
854 if (page->mapping) { /* Race with truncate? */
855 if (mapping_cap_account_dirty(mapping))
856 inc_page_state(nr_dirty);
857 radix_tree_tag_set(&mapping->page_tree,
859 PAGECACHE_TAG_DIRTY);
861 write_unlock_irq(&mapping->tree_lock);
862 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
867 EXPORT_SYMBOL(__set_page_dirty_buffers);
870 * Write out and wait upon a list of buffers.
872 * We have conflicting pressures: we want to make sure that all
873 * initially dirty buffers get waited on, but that any subsequently
874 * dirtied buffers don't. After all, we don't want fsync to last
875 * forever if somebody is actively writing to the file.
877 * Do this in two main stages: first we copy dirty buffers to a
878 * temporary inode list, queueing the writes as we go. Then we clean
879 * up, waiting for those writes to complete.
881 * During this second stage, any subsequent updates to the file may end
882 * up refiling the buffer on the original inode's dirty list again, so
883 * there is a chance we will end up with a buffer queued for write but
884 * not yet completed on that list. So, as a final cleanup we go through
885 * the osync code to catch these locked, dirty buffers without requeuing
886 * any newly dirty buffers for write.
888 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
890 struct buffer_head *bh;
891 struct list_head tmp;
894 INIT_LIST_HEAD(&tmp);
897 while (!list_empty(list)) {
898 bh = BH_ENTRY(list->next);
899 list_del_init(&bh->b_assoc_buffers);
900 if (buffer_dirty(bh) || buffer_locked(bh)) {
901 list_add(&bh->b_assoc_buffers, &tmp);
902 if (buffer_dirty(bh)) {
906 * Ensure any pending I/O completes so that
907 * ll_rw_block() actually writes the current
908 * contents - it is a noop if I/O is still in
909 * flight on potentially older contents.
911 ll_rw_block(SWRITE, 1, &bh);
918 while (!list_empty(&tmp)) {
919 bh = BH_ENTRY(tmp.prev);
920 __remove_assoc_queue(bh);
924 if (!buffer_uptodate(bh))
931 err2 = osync_buffers_list(lock, list);
939 * Invalidate any and all dirty buffers on a given inode. We are
940 * probably unmounting the fs, but that doesn't mean we have already
941 * done a sync(). Just drop the buffers from the inode list.
943 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
944 * assumes that all the buffers are against the blockdev. Not true
947 void invalidate_inode_buffers(struct inode *inode)
949 if (inode_has_buffers(inode)) {
950 struct address_space *mapping = &inode->i_data;
951 struct list_head *list = &mapping->private_list;
952 struct address_space *buffer_mapping = mapping->assoc_mapping;
954 spin_lock(&buffer_mapping->private_lock);
955 while (!list_empty(list))
956 __remove_assoc_queue(BH_ENTRY(list->next));
957 spin_unlock(&buffer_mapping->private_lock);
962 * Remove any clean buffers from the inode's buffer list. This is called
963 * when we're trying to free the inode itself. Those buffers can pin it.
965 * Returns true if all buffers were removed.
967 int remove_inode_buffers(struct inode *inode)
971 if (inode_has_buffers(inode)) {
972 struct address_space *mapping = &inode->i_data;
973 struct list_head *list = &mapping->private_list;
974 struct address_space *buffer_mapping = mapping->assoc_mapping;
976 spin_lock(&buffer_mapping->private_lock);
977 while (!list_empty(list)) {
978 struct buffer_head *bh = BH_ENTRY(list->next);
979 if (buffer_dirty(bh)) {
983 __remove_assoc_queue(bh);
985 spin_unlock(&buffer_mapping->private_lock);
991 * Create the appropriate buffers when given a page for data area and
992 * the size of each buffer.. Use the bh->b_this_page linked list to
993 * follow the buffers created. Return NULL if unable to create more
996 * The retry flag is used to differentiate async IO (paging, swapping)
997 * which may not fail from ordinary buffer allocations.
999 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1002 struct buffer_head *bh, *head;
1008 while ((offset -= size) >= 0) {
1009 bh = alloc_buffer_head(GFP_NOFS);
1014 bh->b_this_page = head;
1019 atomic_set(&bh->b_count, 0);
1020 bh->b_private = NULL;
1023 /* Link the buffer to its page */
1024 set_bh_page(bh, page, offset);
1026 init_buffer(bh, NULL, NULL);
1030 * In case anything failed, we just free everything we got.
1036 head = head->b_this_page;
1037 free_buffer_head(bh);
1042 * Return failure for non-async IO requests. Async IO requests
1043 * are not allowed to fail, so we have to wait until buffer heads
1044 * become available. But we don't want tasks sleeping with
1045 * partially complete buffers, so all were released above.
1050 /* We're _really_ low on memory. Now we just
1051 * wait for old buffer heads to become free due to
1052 * finishing IO. Since this is an async request and
1053 * the reserve list is empty, we're sure there are
1054 * async buffer heads in use.
1059 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1062 link_dev_buffers(struct page *page, struct buffer_head *head)
1064 struct buffer_head *bh, *tail;
1069 bh = bh->b_this_page;
1071 tail->b_this_page = head;
1072 attach_page_buffers(page, head);
1076 * Initialise the state of a blockdev page's buffers.
1079 init_page_buffers(struct page *page, struct block_device *bdev,
1080 sector_t block, int size)
1082 struct buffer_head *head = page_buffers(page);
1083 struct buffer_head *bh = head;
1084 int uptodate = PageUptodate(page);
1087 if (!buffer_mapped(bh)) {
1088 init_buffer(bh, NULL, NULL);
1090 bh->b_blocknr = block;
1092 set_buffer_uptodate(bh);
1093 set_buffer_mapped(bh);
1096 bh = bh->b_this_page;
1097 } while (bh != head);
1101 * Create the page-cache page that contains the requested block.
1103 * This is user purely for blockdev mappings.
1105 static struct page *
1106 grow_dev_page(struct block_device *bdev, sector_t block,
1107 pgoff_t index, int size)
1109 struct inode *inode = bdev->bd_inode;
1111 struct buffer_head *bh;
1113 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1117 if (!PageLocked(page))
1120 if (page_has_buffers(page)) {
1121 bh = page_buffers(page);
1122 if (bh->b_size == size) {
1123 init_page_buffers(page, bdev, block, size);
1126 if (!try_to_free_buffers(page))
1131 * Allocate some buffers for this page
1133 bh = alloc_page_buffers(page, size, 0);
1138 * Link the page to the buffers and initialise them. Take the
1139 * lock to be atomic wrt __find_get_block(), which does not
1140 * run under the page lock.
1142 spin_lock(&inode->i_mapping->private_lock);
1143 link_dev_buffers(page, bh);
1144 init_page_buffers(page, bdev, block, size);
1145 spin_unlock(&inode->i_mapping->private_lock);
1151 page_cache_release(page);
1156 * Create buffers for the specified block device block's page. If
1157 * that page was dirty, the buffers are set dirty also.
1159 * Except that's a bug. Attaching dirty buffers to a dirty
1160 * blockdev's page can result in filesystem corruption, because
1161 * some of those buffers may be aliases of filesystem data.
1162 * grow_dev_page() will go BUG() if this happens.
1165 grow_buffers(struct block_device *bdev, sector_t block, int size)
1174 } while ((size << sizebits) < PAGE_SIZE);
1176 index = block >> sizebits;
1177 block = index << sizebits;
1179 /* Create a page with the proper size buffers.. */
1180 page = grow_dev_page(bdev, block, index, size);
1184 page_cache_release(page);
1188 static struct buffer_head *
1189 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1191 /* Size must be multiple of hard sectorsize */
1192 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1193 (size < 512 || size > PAGE_SIZE))) {
1194 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1196 printk(KERN_ERR "hardsect size: %d\n",
1197 bdev_hardsect_size(bdev));
1204 struct buffer_head * bh;
1206 bh = __find_get_block(bdev, block, size);
1210 if (!grow_buffers(bdev, block, size))
1216 * The relationship between dirty buffers and dirty pages:
1218 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1219 * the page is tagged dirty in its radix tree.
1221 * At all times, the dirtiness of the buffers represents the dirtiness of
1222 * subsections of the page. If the page has buffers, the page dirty bit is
1223 * merely a hint about the true dirty state.
1225 * When a page is set dirty in its entirety, all its buffers are marked dirty
1226 * (if the page has buffers).
1228 * When a buffer is marked dirty, its page is dirtied, but the page's other
1231 * Also. When blockdev buffers are explicitly read with bread(), they
1232 * individually become uptodate. But their backing page remains not
1233 * uptodate - even if all of its buffers are uptodate. A subsequent
1234 * block_read_full_page() against that page will discover all the uptodate
1235 * buffers, will set the page uptodate and will perform no I/O.
1239 * mark_buffer_dirty - mark a buffer_head as needing writeout
1240 * @bh: the buffer_head to mark dirty
1242 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1243 * backing page dirty, then tag the page as dirty in its address_space's radix
1244 * tree and then attach the address_space's inode to its superblock's dirty
1247 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1248 * mapping->tree_lock and the global inode_lock.
1250 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1252 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1253 __set_page_dirty_nobuffers(bh->b_page);
1257 * Decrement a buffer_head's reference count. If all buffers against a page
1258 * have zero reference count, are clean and unlocked, and if the page is clean
1259 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1260 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1261 * a page but it ends up not being freed, and buffers may later be reattached).
1263 void __brelse(struct buffer_head * buf)
1265 if (atomic_read(&buf->b_count)) {
1269 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1274 * bforget() is like brelse(), except it discards any
1275 * potentially dirty data.
1277 void __bforget(struct buffer_head *bh)
1279 clear_buffer_dirty(bh);
1280 if (!list_empty(&bh->b_assoc_buffers)) {
1281 struct address_space *buffer_mapping = bh->b_page->mapping;
1283 spin_lock(&buffer_mapping->private_lock);
1284 list_del_init(&bh->b_assoc_buffers);
1285 spin_unlock(&buffer_mapping->private_lock);
1290 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1293 if (buffer_uptodate(bh)) {
1298 bh->b_end_io = end_buffer_read_sync;
1299 submit_bh(READ, bh);
1301 if (buffer_uptodate(bh))
1309 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1310 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1311 * refcount elevated by one when they're in an LRU. A buffer can only appear
1312 * once in a particular CPU's LRU. A single buffer can be present in multiple
1313 * CPU's LRUs at the same time.
1315 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1316 * sb_find_get_block().
1318 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1319 * a local interrupt disable for that.
1322 #define BH_LRU_SIZE 8
1325 struct buffer_head *bhs[BH_LRU_SIZE];
1328 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1331 #define bh_lru_lock() local_irq_disable()
1332 #define bh_lru_unlock() local_irq_enable()
1334 #define bh_lru_lock() preempt_disable()
1335 #define bh_lru_unlock() preempt_enable()
1338 static inline void check_irqs_on(void)
1340 #ifdef irqs_disabled
1341 BUG_ON(irqs_disabled());
1346 * The LRU management algorithm is dopey-but-simple. Sorry.
1348 static void bh_lru_install(struct buffer_head *bh)
1350 struct buffer_head *evictee = NULL;
1355 lru = &__get_cpu_var(bh_lrus);
1356 if (lru->bhs[0] != bh) {
1357 struct buffer_head *bhs[BH_LRU_SIZE];
1363 for (in = 0; in < BH_LRU_SIZE; in++) {
1364 struct buffer_head *bh2 = lru->bhs[in];
1369 if (out >= BH_LRU_SIZE) {
1370 BUG_ON(evictee != NULL);
1377 while (out < BH_LRU_SIZE)
1379 memcpy(lru->bhs, bhs, sizeof(bhs));
1388 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1390 static struct buffer_head *
1391 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1393 struct buffer_head *ret = NULL;
1399 lru = &__get_cpu_var(bh_lrus);
1400 for (i = 0; i < BH_LRU_SIZE; i++) {
1401 struct buffer_head *bh = lru->bhs[i];
1403 if (bh && bh->b_bdev == bdev &&
1404 bh->b_blocknr == block && bh->b_size == size) {
1407 lru->bhs[i] = lru->bhs[i - 1];
1422 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1423 * it in the LRU and mark it as accessed. If it is not present then return
1426 struct buffer_head *
1427 __find_get_block(struct block_device *bdev, sector_t block, int size)
1429 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1432 bh = __find_get_block_slow(bdev, block);
1440 EXPORT_SYMBOL(__find_get_block);
1443 * __getblk will locate (and, if necessary, create) the buffer_head
1444 * which corresponds to the passed block_device, block and size. The
1445 * returned buffer has its reference count incremented.
1447 * __getblk() cannot fail - it just keeps trying. If you pass it an
1448 * illegal block number, __getblk() will happily return a buffer_head
1449 * which represents the non-existent block. Very weird.
1451 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1452 * attempt is failing. FIXME, perhaps?
1454 struct buffer_head *
1455 __getblk(struct block_device *bdev, sector_t block, int size)
1457 struct buffer_head *bh = __find_get_block(bdev, block, size);
1461 bh = __getblk_slow(bdev, block, size);
1464 EXPORT_SYMBOL(__getblk);
1467 * Do async read-ahead on a buffer..
1469 void __breadahead(struct block_device *bdev, sector_t block, int size)
1471 struct buffer_head *bh = __getblk(bdev, block, size);
1473 ll_rw_block(READA, 1, &bh);
1477 EXPORT_SYMBOL(__breadahead);
1480 * __bread() - reads a specified block and returns the bh
1481 * @bdev: the block_device to read from
1482 * @block: number of block
1483 * @size: size (in bytes) to read
1485 * Reads a specified block, and returns buffer head that contains it.
1486 * It returns NULL if the block was unreadable.
1488 struct buffer_head *
1489 __bread(struct block_device *bdev, sector_t block, int size)
1491 struct buffer_head *bh = __getblk(bdev, block, size);
1493 if (likely(bh) && !buffer_uptodate(bh))
1494 bh = __bread_slow(bh);
1497 EXPORT_SYMBOL(__bread);
1500 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1501 * This doesn't race because it runs in each cpu either in irq
1502 * or with preempt disabled.
1504 static void invalidate_bh_lru(void *arg)
1506 struct bh_lru *b = &get_cpu_var(bh_lrus);
1509 for (i = 0; i < BH_LRU_SIZE; i++) {
1513 put_cpu_var(bh_lrus);
1516 static void invalidate_bh_lrus(void)
1518 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1521 void set_bh_page(struct buffer_head *bh,
1522 struct page *page, unsigned long offset)
1525 if (offset >= PAGE_SIZE)
1527 if (PageHighMem(page))
1529 * This catches illegal uses and preserves the offset:
1531 bh->b_data = (char *)(0 + offset);
1533 bh->b_data = page_address(page) + offset;
1535 EXPORT_SYMBOL(set_bh_page);
1538 * Called when truncating a buffer on a page completely.
1540 static void discard_buffer(struct buffer_head * bh)
1543 clear_buffer_dirty(bh);
1545 clear_buffer_mapped(bh);
1546 clear_buffer_req(bh);
1547 clear_buffer_new(bh);
1548 clear_buffer_delay(bh);
1553 * try_to_release_page() - release old fs-specific metadata on a page
1555 * @page: the page which the kernel is trying to free
1556 * @gfp_mask: memory allocation flags (and I/O mode)
1558 * The address_space is to try to release any data against the page
1559 * (presumably at page->private). If the release was successful, return `1'.
1560 * Otherwise return zero.
1562 * The @gfp_mask argument specifies whether I/O may be performed to release
1563 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1565 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1567 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1569 struct address_space * const mapping = page->mapping;
1571 BUG_ON(!PageLocked(page));
1572 if (PageWriteback(page))
1575 if (mapping && mapping->a_ops->releasepage)
1576 return mapping->a_ops->releasepage(page, gfp_mask);
1577 return try_to_free_buffers(page);
1579 EXPORT_SYMBOL(try_to_release_page);
1582 * block_invalidatepage - invalidate part of all of a buffer-backed page
1584 * @page: the page which is affected
1585 * @offset: the index of the truncation point
1587 * block_invalidatepage() is called when all or part of the page has become
1588 * invalidatedby a truncate operation.
1590 * block_invalidatepage() does not have to release all buffers, but it must
1591 * ensure that no dirty buffer is left outside @offset and that no I/O
1592 * is underway against any of the blocks which are outside the truncation
1593 * point. Because the caller is about to free (and possibly reuse) those
1596 void block_invalidatepage(struct page *page, unsigned long offset)
1598 struct buffer_head *head, *bh, *next;
1599 unsigned int curr_off = 0;
1601 BUG_ON(!PageLocked(page));
1602 if (!page_has_buffers(page))
1605 head = page_buffers(page);
1608 unsigned int next_off = curr_off + bh->b_size;
1609 next = bh->b_this_page;
1612 * is this block fully invalidated?
1614 if (offset <= curr_off)
1616 curr_off = next_off;
1618 } while (bh != head);
1621 * We release buffers only if the entire page is being invalidated.
1622 * The get_block cached value has been unconditionally invalidated,
1623 * so real IO is not possible anymore.
1626 try_to_release_page(page, 0);
1630 EXPORT_SYMBOL(block_invalidatepage);
1632 void do_invalidatepage(struct page *page, unsigned long offset)
1634 void (*invalidatepage)(struct page *, unsigned long);
1635 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1636 block_invalidatepage;
1637 (*invalidatepage)(page, offset);
1641 * We attach and possibly dirty the buffers atomically wrt
1642 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1643 * is already excluded via the page lock.
1645 void create_empty_buffers(struct page *page,
1646 unsigned long blocksize, unsigned long b_state)
1648 struct buffer_head *bh, *head, *tail;
1650 head = alloc_page_buffers(page, blocksize, 1);
1653 bh->b_state |= b_state;
1655 bh = bh->b_this_page;
1657 tail->b_this_page = head;
1659 spin_lock(&page->mapping->private_lock);
1660 if (PageUptodate(page) || PageDirty(page)) {
1663 if (PageDirty(page))
1664 set_buffer_dirty(bh);
1665 if (PageUptodate(page))
1666 set_buffer_uptodate(bh);
1667 bh = bh->b_this_page;
1668 } while (bh != head);
1670 attach_page_buffers(page, head);
1671 spin_unlock(&page->mapping->private_lock);
1673 EXPORT_SYMBOL(create_empty_buffers);
1676 * We are taking a block for data and we don't want any output from any
1677 * buffer-cache aliases starting from return from that function and
1678 * until the moment when something will explicitly mark the buffer
1679 * dirty (hopefully that will not happen until we will free that block ;-)
1680 * We don't even need to mark it not-uptodate - nobody can expect
1681 * anything from a newly allocated buffer anyway. We used to used
1682 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1683 * don't want to mark the alias unmapped, for example - it would confuse
1684 * anyone who might pick it with bread() afterwards...
1686 * Also.. Note that bforget() doesn't lock the buffer. So there can
1687 * be writeout I/O going on against recently-freed buffers. We don't
1688 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1689 * only if we really need to. That happens here.
1691 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1693 struct buffer_head *old_bh;
1697 old_bh = __find_get_block_slow(bdev, block);
1699 clear_buffer_dirty(old_bh);
1700 wait_on_buffer(old_bh);
1701 clear_buffer_req(old_bh);
1705 EXPORT_SYMBOL(unmap_underlying_metadata);
1708 * NOTE! All mapped/uptodate combinations are valid:
1710 * Mapped Uptodate Meaning
1712 * No No "unknown" - must do get_block()
1713 * No Yes "hole" - zero-filled
1714 * Yes No "allocated" - allocated on disk, not read in
1715 * Yes Yes "valid" - allocated and up-to-date in memory.
1717 * "Dirty" is valid only with the last case (mapped+uptodate).
1721 * While block_write_full_page is writing back the dirty buffers under
1722 * the page lock, whoever dirtied the buffers may decide to clean them
1723 * again at any time. We handle that by only looking at the buffer
1724 * state inside lock_buffer().
1726 * If block_write_full_page() is called for regular writeback
1727 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1728 * locked buffer. This only can happen if someone has written the buffer
1729 * directly, with submit_bh(). At the address_space level PageWriteback
1730 * prevents this contention from occurring.
1732 static int __block_write_full_page(struct inode *inode, struct page *page,
1733 get_block_t *get_block, struct writeback_control *wbc)
1737 sector_t last_block;
1738 struct buffer_head *bh, *head;
1739 int nr_underway = 0;
1741 BUG_ON(!PageLocked(page));
1743 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1745 if (!page_has_buffers(page)) {
1746 create_empty_buffers(page, 1 << inode->i_blkbits,
1747 (1 << BH_Dirty)|(1 << BH_Uptodate));
1751 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1752 * here, and the (potentially unmapped) buffers may become dirty at
1753 * any time. If a buffer becomes dirty here after we've inspected it
1754 * then we just miss that fact, and the page stays dirty.
1756 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1757 * handle that here by just cleaning them.
1760 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1761 head = page_buffers(page);
1765 * Get all the dirty buffers mapped to disk addresses and
1766 * handle any aliases from the underlying blockdev's mapping.
1769 if (block > last_block) {
1771 * mapped buffers outside i_size will occur, because
1772 * this page can be outside i_size when there is a
1773 * truncate in progress.
1776 * The buffer was zeroed by block_write_full_page()
1778 clear_buffer_dirty(bh);
1779 set_buffer_uptodate(bh);
1780 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1781 err = get_block(inode, block, bh, 1);
1784 if (buffer_new(bh)) {
1785 /* blockdev mappings never come here */
1786 clear_buffer_new(bh);
1787 unmap_underlying_metadata(bh->b_bdev,
1791 bh = bh->b_this_page;
1793 } while (bh != head);
1796 if (!buffer_mapped(bh))
1799 * If it's a fully non-blocking write attempt and we cannot
1800 * lock the buffer then redirty the page. Note that this can
1801 * potentially cause a busy-wait loop from pdflush and kswapd
1802 * activity, but those code paths have their own higher-level
1805 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1807 } else if (test_set_buffer_locked(bh)) {
1808 redirty_page_for_writepage(wbc, page);
1811 if (test_clear_buffer_dirty(bh)) {
1812 mark_buffer_async_write(bh);
1816 } while ((bh = bh->b_this_page) != head);
1819 * The page and its buffers are protected by PageWriteback(), so we can
1820 * drop the bh refcounts early.
1822 BUG_ON(PageWriteback(page));
1823 set_page_writeback(page);
1826 struct buffer_head *next = bh->b_this_page;
1827 if (buffer_async_write(bh)) {
1828 submit_bh(WRITE, bh);
1832 } while (bh != head);
1837 if (nr_underway == 0) {
1839 * The page was marked dirty, but the buffers were
1840 * clean. Someone wrote them back by hand with
1841 * ll_rw_block/submit_bh. A rare case.
1845 if (!buffer_uptodate(bh)) {
1849 bh = bh->b_this_page;
1850 } while (bh != head);
1852 SetPageUptodate(page);
1853 end_page_writeback(page);
1855 * The page and buffer_heads can be released at any time from
1858 wbc->pages_skipped++; /* We didn't write this page */
1864 * ENOSPC, or some other error. We may already have added some
1865 * blocks to the file, so we need to write these out to avoid
1866 * exposing stale data.
1867 * The page is currently locked and not marked for writeback
1870 /* Recovery: lock and submit the mapped buffers */
1872 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1874 mark_buffer_async_write(bh);
1877 * The buffer may have been set dirty during
1878 * attachment to a dirty page.
1880 clear_buffer_dirty(bh);
1882 } while ((bh = bh->b_this_page) != head);
1884 BUG_ON(PageWriteback(page));
1885 set_page_writeback(page);
1888 struct buffer_head *next = bh->b_this_page;
1889 if (buffer_async_write(bh)) {
1890 clear_buffer_dirty(bh);
1891 submit_bh(WRITE, bh);
1895 } while (bh != head);
1899 static int __block_prepare_write(struct inode *inode, struct page *page,
1900 unsigned from, unsigned to, get_block_t *get_block)
1902 unsigned block_start, block_end;
1905 unsigned blocksize, bbits;
1906 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1908 BUG_ON(!PageLocked(page));
1909 BUG_ON(from > PAGE_CACHE_SIZE);
1910 BUG_ON(to > PAGE_CACHE_SIZE);
1913 blocksize = 1 << inode->i_blkbits;
1914 if (!page_has_buffers(page))
1915 create_empty_buffers(page, blocksize, 0);
1916 head = page_buffers(page);
1918 bbits = inode->i_blkbits;
1919 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1921 for(bh = head, block_start = 0; bh != head || !block_start;
1922 block++, block_start=block_end, bh = bh->b_this_page) {
1923 block_end = block_start + blocksize;
1924 if (block_end <= from || block_start >= to) {
1925 if (PageUptodate(page)) {
1926 if (!buffer_uptodate(bh))
1927 set_buffer_uptodate(bh);
1932 clear_buffer_new(bh);
1933 if (!buffer_mapped(bh)) {
1934 err = get_block(inode, block, bh, 1);
1937 if (buffer_new(bh)) {
1938 unmap_underlying_metadata(bh->b_bdev,
1940 if (PageUptodate(page)) {
1941 set_buffer_uptodate(bh);
1944 if (block_end > to || block_start < from) {
1947 kaddr = kmap_atomic(page, KM_USER0);
1951 if (block_start < from)
1952 memset(kaddr+block_start,
1953 0, from-block_start);
1954 flush_dcache_page(page);
1955 kunmap_atomic(kaddr, KM_USER0);
1960 if (PageUptodate(page)) {
1961 if (!buffer_uptodate(bh))
1962 set_buffer_uptodate(bh);
1965 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1966 (block_start < from || block_end > to)) {
1967 ll_rw_block(READ, 1, &bh);
1972 * If we issued read requests - let them complete.
1974 while(wait_bh > wait) {
1975 wait_on_buffer(*--wait_bh);
1976 if (!buffer_uptodate(*wait_bh))
1983 clear_buffer_new(bh);
1984 } while ((bh = bh->b_this_page) != head);
1989 * Zero out any newly allocated blocks to avoid exposing stale
1990 * data. If BH_New is set, we know that the block was newly
1991 * allocated in the above loop.
1996 block_end = block_start+blocksize;
1997 if (block_end <= from)
1999 if (block_start >= to)
2001 if (buffer_new(bh)) {
2004 clear_buffer_new(bh);
2005 kaddr = kmap_atomic(page, KM_USER0);
2006 memset(kaddr+block_start, 0, bh->b_size);
2007 kunmap_atomic(kaddr, KM_USER0);
2008 set_buffer_uptodate(bh);
2009 mark_buffer_dirty(bh);
2012 block_start = block_end;
2013 bh = bh->b_this_page;
2014 } while (bh != head);
2018 static int __block_commit_write(struct inode *inode, struct page *page,
2019 unsigned from, unsigned to)
2021 unsigned block_start, block_end;
2024 struct buffer_head *bh, *head;
2026 blocksize = 1 << inode->i_blkbits;
2028 for(bh = head = page_buffers(page), block_start = 0;
2029 bh != head || !block_start;
2030 block_start=block_end, bh = bh->b_this_page) {
2031 block_end = block_start + blocksize;
2032 if (block_end <= from || block_start >= to) {
2033 if (!buffer_uptodate(bh))
2036 set_buffer_uptodate(bh);
2037 mark_buffer_dirty(bh);
2042 * If this is a partial write which happened to make all buffers
2043 * uptodate then we can optimize away a bogus readpage() for
2044 * the next read(). Here we 'discover' whether the page went
2045 * uptodate as a result of this (potentially partial) write.
2048 SetPageUptodate(page);
2053 * Generic "read page" function for block devices that have the normal
2054 * get_block functionality. This is most of the block device filesystems.
2055 * Reads the page asynchronously --- the unlock_buffer() and
2056 * set/clear_buffer_uptodate() functions propagate buffer state into the
2057 * page struct once IO has completed.
2059 int block_read_full_page(struct page *page, get_block_t *get_block)
2061 struct inode *inode = page->mapping->host;
2062 sector_t iblock, lblock;
2063 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2064 unsigned int blocksize;
2066 int fully_mapped = 1;
2068 BUG_ON(!PageLocked(page));
2069 blocksize = 1 << inode->i_blkbits;
2070 if (!page_has_buffers(page))
2071 create_empty_buffers(page, blocksize, 0);
2072 head = page_buffers(page);
2074 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2075 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2081 if (buffer_uptodate(bh))
2084 if (!buffer_mapped(bh)) {
2088 if (iblock < lblock) {
2089 err = get_block(inode, iblock, bh, 0);
2093 if (!buffer_mapped(bh)) {
2094 void *kaddr = kmap_atomic(page, KM_USER0);
2095 memset(kaddr + i * blocksize, 0, blocksize);
2096 flush_dcache_page(page);
2097 kunmap_atomic(kaddr, KM_USER0);
2099 set_buffer_uptodate(bh);
2103 * get_block() might have updated the buffer
2106 if (buffer_uptodate(bh))
2110 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2113 SetPageMappedToDisk(page);
2117 * All buffers are uptodate - we can set the page uptodate
2118 * as well. But not if get_block() returned an error.
2120 if (!PageError(page))
2121 SetPageUptodate(page);
2126 /* Stage two: lock the buffers */
2127 for (i = 0; i < nr; i++) {
2130 mark_buffer_async_read(bh);
2134 * Stage 3: start the IO. Check for uptodateness
2135 * inside the buffer lock in case another process reading
2136 * the underlying blockdev brought it uptodate (the sct fix).
2138 for (i = 0; i < nr; i++) {
2140 if (buffer_uptodate(bh))
2141 end_buffer_async_read(bh, 1);
2143 submit_bh(READ, bh);
2148 /* utility function for filesystems that need to do work on expanding
2149 * truncates. Uses prepare/commit_write to allow the filesystem to
2150 * deal with the hole.
2152 static int __generic_cont_expand(struct inode *inode, loff_t size,
2153 pgoff_t index, unsigned int offset)
2155 struct address_space *mapping = inode->i_mapping;
2157 unsigned long limit;
2161 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2162 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2163 send_sig(SIGXFSZ, current, 0);
2166 if (size > inode->i_sb->s_maxbytes)
2170 page = grab_cache_page(mapping, index);
2173 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2176 * ->prepare_write() may have instantiated a few blocks
2177 * outside i_size. Trim these off again.
2180 page_cache_release(page);
2181 vmtruncate(inode, inode->i_size);
2185 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2188 page_cache_release(page);
2195 int generic_cont_expand(struct inode *inode, loff_t size)
2198 unsigned int offset;
2200 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2202 /* ugh. in prepare/commit_write, if from==to==start of block, we
2203 ** skip the prepare. make sure we never send an offset for the start
2206 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2207 /* caller must handle this extra byte. */
2210 index = size >> PAGE_CACHE_SHIFT;
2212 return __generic_cont_expand(inode, size, index, offset);
2215 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2217 loff_t pos = size - 1;
2218 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2219 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2221 /* prepare/commit_write can handle even if from==to==start of block. */
2222 return __generic_cont_expand(inode, size, index, offset);
2226 * For moronic filesystems that do not allow holes in file.
2227 * We may have to extend the file.
2230 int cont_prepare_write(struct page *page, unsigned offset,
2231 unsigned to, get_block_t *get_block, loff_t *bytes)
2233 struct address_space *mapping = page->mapping;
2234 struct inode *inode = mapping->host;
2235 struct page *new_page;
2239 unsigned blocksize = 1 << inode->i_blkbits;
2242 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2244 new_page = grab_cache_page(mapping, pgpos);
2247 /* we might sleep */
2248 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2249 unlock_page(new_page);
2250 page_cache_release(new_page);
2253 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2254 if (zerofrom & (blocksize-1)) {
2255 *bytes |= (blocksize-1);
2258 status = __block_prepare_write(inode, new_page, zerofrom,
2259 PAGE_CACHE_SIZE, get_block);
2262 kaddr = kmap_atomic(new_page, KM_USER0);
2263 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2264 flush_dcache_page(new_page);
2265 kunmap_atomic(kaddr, KM_USER0);
2266 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2267 unlock_page(new_page);
2268 page_cache_release(new_page);
2271 if (page->index < pgpos) {
2272 /* completely inside the area */
2275 /* page covers the boundary, find the boundary offset */
2276 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2278 /* if we will expand the thing last block will be filled */
2279 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2280 *bytes |= (blocksize-1);
2284 /* starting below the boundary? Nothing to zero out */
2285 if (offset <= zerofrom)
2288 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2291 if (zerofrom < offset) {
2292 kaddr = kmap_atomic(page, KM_USER0);
2293 memset(kaddr+zerofrom, 0, offset-zerofrom);
2294 flush_dcache_page(page);
2295 kunmap_atomic(kaddr, KM_USER0);
2296 __block_commit_write(inode, page, zerofrom, offset);
2300 ClearPageUptodate(page);
2304 ClearPageUptodate(new_page);
2305 unlock_page(new_page);
2306 page_cache_release(new_page);
2311 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2312 get_block_t *get_block)
2314 struct inode *inode = page->mapping->host;
2315 int err = __block_prepare_write(inode, page, from, to, get_block);
2317 ClearPageUptodate(page);
2321 int block_commit_write(struct page *page, unsigned from, unsigned to)
2323 struct inode *inode = page->mapping->host;
2324 __block_commit_write(inode,page,from,to);
2328 int generic_commit_write(struct file *file, struct page *page,
2329 unsigned from, unsigned to)
2331 struct inode *inode = page->mapping->host;
2332 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2333 __block_commit_write(inode,page,from,to);
2335 * No need to use i_size_read() here, the i_size
2336 * cannot change under us because we hold i_mutex.
2338 if (pos > inode->i_size) {
2339 i_size_write(inode, pos);
2340 mark_inode_dirty(inode);
2347 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2348 * immediately, while under the page lock. So it needs a special end_io
2349 * handler which does not touch the bh after unlocking it.
2351 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2352 * a race there is benign: unlock_buffer() only use the bh's address for
2353 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2356 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2359 set_buffer_uptodate(bh);
2361 /* This happens, due to failed READA attempts. */
2362 clear_buffer_uptodate(bh);
2368 * On entry, the page is fully not uptodate.
2369 * On exit the page is fully uptodate in the areas outside (from,to)
2371 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2372 get_block_t *get_block)
2374 struct inode *inode = page->mapping->host;
2375 const unsigned blkbits = inode->i_blkbits;
2376 const unsigned blocksize = 1 << blkbits;
2377 struct buffer_head map_bh;
2378 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2379 unsigned block_in_page;
2380 unsigned block_start;
2381 sector_t block_in_file;
2386 int is_mapped_to_disk = 1;
2389 if (PageMappedToDisk(page))
2392 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2393 map_bh.b_page = page;
2396 * We loop across all blocks in the page, whether or not they are
2397 * part of the affected region. This is so we can discover if the
2398 * page is fully mapped-to-disk.
2400 for (block_start = 0, block_in_page = 0;
2401 block_start < PAGE_CACHE_SIZE;
2402 block_in_page++, block_start += blocksize) {
2403 unsigned block_end = block_start + blocksize;
2408 if (block_start >= to)
2410 ret = get_block(inode, block_in_file + block_in_page,
2414 if (!buffer_mapped(&map_bh))
2415 is_mapped_to_disk = 0;
2416 if (buffer_new(&map_bh))
2417 unmap_underlying_metadata(map_bh.b_bdev,
2419 if (PageUptodate(page))
2421 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2422 kaddr = kmap_atomic(page, KM_USER0);
2423 if (block_start < from) {
2424 memset(kaddr+block_start, 0, from-block_start);
2427 if (block_end > to) {
2428 memset(kaddr + to, 0, block_end - to);
2431 flush_dcache_page(page);
2432 kunmap_atomic(kaddr, KM_USER0);
2435 if (buffer_uptodate(&map_bh))
2436 continue; /* reiserfs does this */
2437 if (block_start < from || block_end > to) {
2438 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2444 bh->b_state = map_bh.b_state;
2445 atomic_set(&bh->b_count, 0);
2446 bh->b_this_page = NULL;
2448 bh->b_blocknr = map_bh.b_blocknr;
2449 bh->b_size = blocksize;
2450 bh->b_data = (char *)(long)block_start;
2451 bh->b_bdev = map_bh.b_bdev;
2452 bh->b_private = NULL;
2453 read_bh[nr_reads++] = bh;
2458 struct buffer_head *bh;
2461 * The page is locked, so these buffers are protected from
2462 * any VM or truncate activity. Hence we don't need to care
2463 * for the buffer_head refcounts.
2465 for (i = 0; i < nr_reads; i++) {
2468 bh->b_end_io = end_buffer_read_nobh;
2469 submit_bh(READ, bh);
2471 for (i = 0; i < nr_reads; i++) {
2474 if (!buffer_uptodate(bh))
2476 free_buffer_head(bh);
2483 if (is_mapped_to_disk)
2484 SetPageMappedToDisk(page);
2485 SetPageUptodate(page);
2488 * Setting the page dirty here isn't necessary for the prepare_write
2489 * function - commit_write will do that. But if/when this function is
2490 * used within the pagefault handler to ensure that all mmapped pages
2491 * have backing space in the filesystem, we will need to dirty the page
2492 * if its contents were altered.
2495 set_page_dirty(page);
2500 for (i = 0; i < nr_reads; i++) {
2502 free_buffer_head(read_bh[i]);
2506 * Error recovery is pretty slack. Clear the page and mark it dirty
2507 * so we'll later zero out any blocks which _were_ allocated.
2509 kaddr = kmap_atomic(page, KM_USER0);
2510 memset(kaddr, 0, PAGE_CACHE_SIZE);
2511 kunmap_atomic(kaddr, KM_USER0);
2512 SetPageUptodate(page);
2513 set_page_dirty(page);
2516 EXPORT_SYMBOL(nobh_prepare_write);
2518 int nobh_commit_write(struct file *file, struct page *page,
2519 unsigned from, unsigned to)
2521 struct inode *inode = page->mapping->host;
2522 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2524 set_page_dirty(page);
2525 if (pos > inode->i_size) {
2526 i_size_write(inode, pos);
2527 mark_inode_dirty(inode);
2531 EXPORT_SYMBOL(nobh_commit_write);
2534 * nobh_writepage() - based on block_full_write_page() except
2535 * that it tries to operate without attaching bufferheads to
2538 int nobh_writepage(struct page *page, get_block_t *get_block,
2539 struct writeback_control *wbc)
2541 struct inode * const inode = page->mapping->host;
2542 loff_t i_size = i_size_read(inode);
2543 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2548 /* Is the page fully inside i_size? */
2549 if (page->index < end_index)
2552 /* Is the page fully outside i_size? (truncate in progress) */
2553 offset = i_size & (PAGE_CACHE_SIZE-1);
2554 if (page->index >= end_index+1 || !offset) {
2556 * The page may have dirty, unmapped buffers. For example,
2557 * they may have been added in ext3_writepage(). Make them
2558 * freeable here, so the page does not leak.
2561 /* Not really sure about this - do we need this ? */
2562 if (page->mapping->a_ops->invalidatepage)
2563 page->mapping->a_ops->invalidatepage(page, offset);
2566 return 0; /* don't care */
2570 * The page straddles i_size. It must be zeroed out on each and every
2571 * writepage invocation because it may be mmapped. "A file is mapped
2572 * in multiples of the page size. For a file that is not a multiple of
2573 * the page size, the remaining memory is zeroed when mapped, and
2574 * writes to that region are not written out to the file."
2576 kaddr = kmap_atomic(page, KM_USER0);
2577 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2578 flush_dcache_page(page);
2579 kunmap_atomic(kaddr, KM_USER0);
2581 ret = mpage_writepage(page, get_block, wbc);
2583 ret = __block_write_full_page(inode, page, get_block, wbc);
2586 EXPORT_SYMBOL(nobh_writepage);
2589 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2591 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2593 struct inode *inode = mapping->host;
2594 unsigned blocksize = 1 << inode->i_blkbits;
2595 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2596 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2599 struct address_space_operations *a_ops = mapping->a_ops;
2603 if ((offset & (blocksize - 1)) == 0)
2607 page = grab_cache_page(mapping, index);
2611 to = (offset + blocksize) & ~(blocksize - 1);
2612 ret = a_ops->prepare_write(NULL, page, offset, to);
2614 kaddr = kmap_atomic(page, KM_USER0);
2615 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2616 flush_dcache_page(page);
2617 kunmap_atomic(kaddr, KM_USER0);
2618 set_page_dirty(page);
2621 page_cache_release(page);
2625 EXPORT_SYMBOL(nobh_truncate_page);
2627 int block_truncate_page(struct address_space *mapping,
2628 loff_t from, get_block_t *get_block)
2630 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2631 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2634 unsigned length, pos;
2635 struct inode *inode = mapping->host;
2637 struct buffer_head *bh;
2641 blocksize = 1 << inode->i_blkbits;
2642 length = offset & (blocksize - 1);
2644 /* Block boundary? Nothing to do */
2648 length = blocksize - length;
2649 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2651 page = grab_cache_page(mapping, index);
2656 if (!page_has_buffers(page))
2657 create_empty_buffers(page, blocksize, 0);
2659 /* Find the buffer that contains "offset" */
2660 bh = page_buffers(page);
2662 while (offset >= pos) {
2663 bh = bh->b_this_page;
2669 if (!buffer_mapped(bh)) {
2670 err = get_block(inode, iblock, bh, 0);
2673 /* unmapped? It's a hole - nothing to do */
2674 if (!buffer_mapped(bh))
2678 /* Ok, it's mapped. Make sure it's up-to-date */
2679 if (PageUptodate(page))
2680 set_buffer_uptodate(bh);
2682 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2684 ll_rw_block(READ, 1, &bh);
2686 /* Uhhuh. Read error. Complain and punt. */
2687 if (!buffer_uptodate(bh))
2691 kaddr = kmap_atomic(page, KM_USER0);
2692 memset(kaddr + offset, 0, length);
2693 flush_dcache_page(page);
2694 kunmap_atomic(kaddr, KM_USER0);
2696 mark_buffer_dirty(bh);
2701 page_cache_release(page);
2707 * The generic ->writepage function for buffer-backed address_spaces
2709 int block_write_full_page(struct page *page, get_block_t *get_block,
2710 struct writeback_control *wbc)
2712 struct inode * const inode = page->mapping->host;
2713 loff_t i_size = i_size_read(inode);
2714 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2718 /* Is the page fully inside i_size? */
2719 if (page->index < end_index)
2720 return __block_write_full_page(inode, page, get_block, wbc);
2722 /* Is the page fully outside i_size? (truncate in progress) */
2723 offset = i_size & (PAGE_CACHE_SIZE-1);
2724 if (page->index >= end_index+1 || !offset) {
2726 * The page may have dirty, unmapped buffers. For example,
2727 * they may have been added in ext3_writepage(). Make them
2728 * freeable here, so the page does not leak.
2730 do_invalidatepage(page, 0);
2732 return 0; /* don't care */
2736 * The page straddles i_size. It must be zeroed out on each and every
2737 * writepage invokation because it may be mmapped. "A file is mapped
2738 * in multiples of the page size. For a file that is not a multiple of
2739 * the page size, the remaining memory is zeroed when mapped, and
2740 * writes to that region are not written out to the file."
2742 kaddr = kmap_atomic(page, KM_USER0);
2743 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2744 flush_dcache_page(page);
2745 kunmap_atomic(kaddr, KM_USER0);
2746 return __block_write_full_page(inode, page, get_block, wbc);
2749 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2750 get_block_t *get_block)
2752 struct buffer_head tmp;
2753 struct inode *inode = mapping->host;
2756 get_block(inode, block, &tmp, 0);
2757 return tmp.b_blocknr;
2760 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2762 struct buffer_head *bh = bio->bi_private;
2767 if (err == -EOPNOTSUPP) {
2768 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2769 set_bit(BH_Eopnotsupp, &bh->b_state);
2772 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2777 int submit_bh(int rw, struct buffer_head * bh)
2782 BUG_ON(!buffer_locked(bh));
2783 BUG_ON(!buffer_mapped(bh));
2784 BUG_ON(!bh->b_end_io);
2786 if (buffer_ordered(bh) && (rw == WRITE))
2790 * Only clear out a write error when rewriting, should this
2791 * include WRITE_SYNC as well?
2793 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2794 clear_buffer_write_io_error(bh);
2797 * from here on down, it's all bio -- do the initial mapping,
2798 * submit_bio -> generic_make_request may further map this bio around
2800 bio = bio_alloc(GFP_NOIO, 1);
2802 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2803 bio->bi_bdev = bh->b_bdev;
2804 bio->bi_io_vec[0].bv_page = bh->b_page;
2805 bio->bi_io_vec[0].bv_len = bh->b_size;
2806 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2810 bio->bi_size = bh->b_size;
2812 bio->bi_end_io = end_bio_bh_io_sync;
2813 bio->bi_private = bh;
2816 submit_bio(rw, bio);
2818 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2826 * ll_rw_block: low-level access to block devices (DEPRECATED)
2827 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2828 * @nr: number of &struct buffer_heads in the array
2829 * @bhs: array of pointers to &struct buffer_head
2831 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2832 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2833 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2834 * are sent to disk. The fourth %READA option is described in the documentation
2835 * for generic_make_request() which ll_rw_block() calls.
2837 * This function drops any buffer that it cannot get a lock on (with the
2838 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2839 * clean when doing a write request, and any buffer that appears to be
2840 * up-to-date when doing read request. Further it marks as clean buffers that
2841 * are processed for writing (the buffer cache won't assume that they are
2842 * actually clean until the buffer gets unlocked).
2844 * ll_rw_block sets b_end_io to simple completion handler that marks
2845 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2848 * All of the buffers must be for the same device, and must also be a
2849 * multiple of the current approved size for the device.
2851 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2855 for (i = 0; i < nr; i++) {
2856 struct buffer_head *bh = bhs[i];
2860 else if (test_set_buffer_locked(bh))
2863 if (rw == WRITE || rw == SWRITE) {
2864 if (test_clear_buffer_dirty(bh)) {
2865 bh->b_end_io = end_buffer_write_sync;
2867 submit_bh(WRITE, bh);
2871 if (!buffer_uptodate(bh)) {
2872 bh->b_end_io = end_buffer_read_sync;
2883 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2884 * and then start new I/O and then wait upon it. The caller must have a ref on
2887 int sync_dirty_buffer(struct buffer_head *bh)
2891 WARN_ON(atomic_read(&bh->b_count) < 1);
2893 if (test_clear_buffer_dirty(bh)) {
2895 bh->b_end_io = end_buffer_write_sync;
2896 ret = submit_bh(WRITE, bh);
2898 if (buffer_eopnotsupp(bh)) {
2899 clear_buffer_eopnotsupp(bh);
2902 if (!ret && !buffer_uptodate(bh))
2911 * try_to_free_buffers() checks if all the buffers on this particular page
2912 * are unused, and releases them if so.
2914 * Exclusion against try_to_free_buffers may be obtained by either
2915 * locking the page or by holding its mapping's private_lock.
2917 * If the page is dirty but all the buffers are clean then we need to
2918 * be sure to mark the page clean as well. This is because the page
2919 * may be against a block device, and a later reattachment of buffers
2920 * to a dirty page will set *all* buffers dirty. Which would corrupt
2921 * filesystem data on the same device.
2923 * The same applies to regular filesystem pages: if all the buffers are
2924 * clean then we set the page clean and proceed. To do that, we require
2925 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2928 * try_to_free_buffers() is non-blocking.
2930 static inline int buffer_busy(struct buffer_head *bh)
2932 return atomic_read(&bh->b_count) |
2933 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2937 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2939 struct buffer_head *head = page_buffers(page);
2940 struct buffer_head *bh;
2944 if (buffer_write_io_error(bh) && page->mapping)
2945 set_bit(AS_EIO, &page->mapping->flags);
2946 if (buffer_busy(bh))
2948 bh = bh->b_this_page;
2949 } while (bh != head);
2952 struct buffer_head *next = bh->b_this_page;
2954 if (!list_empty(&bh->b_assoc_buffers))
2955 __remove_assoc_queue(bh);
2957 } while (bh != head);
2958 *buffers_to_free = head;
2959 __clear_page_buffers(page);
2965 int try_to_free_buffers(struct page *page)
2967 struct address_space * const mapping = page->mapping;
2968 struct buffer_head *buffers_to_free = NULL;
2971 BUG_ON(!PageLocked(page));
2972 if (PageWriteback(page))
2975 if (mapping == NULL) { /* can this still happen? */
2976 ret = drop_buffers(page, &buffers_to_free);
2980 spin_lock(&mapping->private_lock);
2981 ret = drop_buffers(page, &buffers_to_free);
2984 * If the filesystem writes its buffers by hand (eg ext3)
2985 * then we can have clean buffers against a dirty page. We
2986 * clean the page here; otherwise later reattachment of buffers
2987 * could encounter a non-uptodate page, which is unresolvable.
2988 * This only applies in the rare case where try_to_free_buffers
2989 * succeeds but the page is not freed.
2991 clear_page_dirty(page);
2993 spin_unlock(&mapping->private_lock);
2995 if (buffers_to_free) {
2996 struct buffer_head *bh = buffers_to_free;
2999 struct buffer_head *next = bh->b_this_page;
3000 free_buffer_head(bh);
3002 } while (bh != buffers_to_free);
3006 EXPORT_SYMBOL(try_to_free_buffers);
3008 void block_sync_page(struct page *page)
3010 struct address_space *mapping;
3013 mapping = page_mapping(page);
3015 blk_run_backing_dev(mapping->backing_dev_info, page);
3019 * There are no bdflush tunables left. But distributions are
3020 * still running obsolete flush daemons, so we terminate them here.
3022 * Use of bdflush() is deprecated and will be removed in a future kernel.
3023 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3025 asmlinkage long sys_bdflush(int func, long data)
3027 static int msg_count;
3029 if (!capable(CAP_SYS_ADMIN))
3032 if (msg_count < 5) {
3035 "warning: process `%s' used the obsolete bdflush"
3036 " system call\n", current->comm);
3037 printk(KERN_INFO "Fix your initscripts?\n");
3046 * Buffer-head allocation
3048 static kmem_cache_t *bh_cachep;
3051 * Once the number of bh's in the machine exceeds this level, we start
3052 * stripping them in writeback.
3054 static int max_buffer_heads;
3056 int buffer_heads_over_limit;
3058 struct bh_accounting {
3059 int nr; /* Number of live bh's */
3060 int ratelimit; /* Limit cacheline bouncing */
3063 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3065 static void recalc_bh_state(void)
3070 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3072 __get_cpu_var(bh_accounting).ratelimit = 0;
3073 for_each_online_cpu(i)
3074 tot += per_cpu(bh_accounting, i).nr;
3075 buffer_heads_over_limit = (tot > max_buffer_heads);
3078 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3080 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3082 get_cpu_var(bh_accounting).nr++;
3084 put_cpu_var(bh_accounting);
3088 EXPORT_SYMBOL(alloc_buffer_head);
3090 void free_buffer_head(struct buffer_head *bh)
3092 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3093 kmem_cache_free(bh_cachep, bh);
3094 get_cpu_var(bh_accounting).nr--;
3096 put_cpu_var(bh_accounting);
3098 EXPORT_SYMBOL(free_buffer_head);
3101 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3103 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3104 SLAB_CTOR_CONSTRUCTOR) {
3105 struct buffer_head * bh = (struct buffer_head *)data;
3107 memset(bh, 0, sizeof(*bh));
3108 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3112 #ifdef CONFIG_HOTPLUG_CPU
3113 static void buffer_exit_cpu(int cpu)
3116 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3118 for (i = 0; i < BH_LRU_SIZE; i++) {
3122 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3123 per_cpu(bh_accounting, cpu).nr = 0;
3124 put_cpu_var(bh_accounting);
3127 static int buffer_cpu_notify(struct notifier_block *self,
3128 unsigned long action, void *hcpu)
3130 if (action == CPU_DEAD)
3131 buffer_exit_cpu((unsigned long)hcpu);
3134 #endif /* CONFIG_HOTPLUG_CPU */
3136 void __init buffer_init(void)
3140 bh_cachep = kmem_cache_create("buffer_head",
3141 sizeof(struct buffer_head), 0,
3142 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3148 * Limit the bh occupancy to 10% of ZONE_NORMAL
3150 nrpages = (nr_free_buffer_pages() * 10) / 100;
3151 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3152 hotcpu_notifier(buffer_cpu_notify, 0);
3155 EXPORT_SYMBOL(__bforget);
3156 EXPORT_SYMBOL(__brelse);
3157 EXPORT_SYMBOL(__wait_on_buffer);
3158 EXPORT_SYMBOL(block_commit_write);
3159 EXPORT_SYMBOL(block_prepare_write);
3160 EXPORT_SYMBOL(block_read_full_page);
3161 EXPORT_SYMBOL(block_sync_page);
3162 EXPORT_SYMBOL(block_truncate_page);
3163 EXPORT_SYMBOL(block_write_full_page);
3164 EXPORT_SYMBOL(cont_prepare_write);
3165 EXPORT_SYMBOL(end_buffer_async_write);
3166 EXPORT_SYMBOL(end_buffer_read_sync);
3167 EXPORT_SYMBOL(end_buffer_write_sync);
3168 EXPORT_SYMBOL(file_fsync);
3169 EXPORT_SYMBOL(fsync_bdev);
3170 EXPORT_SYMBOL(generic_block_bmap);
3171 EXPORT_SYMBOL(generic_commit_write);
3172 EXPORT_SYMBOL(generic_cont_expand);
3173 EXPORT_SYMBOL(generic_cont_expand_simple);
3174 EXPORT_SYMBOL(init_buffer);
3175 EXPORT_SYMBOL(invalidate_bdev);
3176 EXPORT_SYMBOL(ll_rw_block);
3177 EXPORT_SYMBOL(mark_buffer_dirty);
3178 EXPORT_SYMBOL(submit_bh);
3179 EXPORT_SYMBOL(sync_dirty_buffer);
3180 EXPORT_SYMBOL(unlock_buffer);