4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/capability.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 static void invalidate_bh_lrus(void);
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 bh->b_end_io = handler;
54 bh->b_private = private;
57 static int sync_buffer(void *word)
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
66 blk_run_address_space(bd->bd_inode->i_mapping);
71 void fastcall __lock_buffer(struct buffer_head *bh)
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
76 EXPORT_SYMBOL(__lock_buffer);
78 void fastcall unlock_buffer(struct buffer_head *bh)
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
90 void __wait_on_buffer(struct buffer_head * bh)
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
96 __clear_page_buffers(struct page *page)
98 ClearPagePrivate(page);
99 set_page_private(page, 0);
100 page_cache_release(page);
103 static void buffer_io_error(struct buffer_head *bh)
105 char b[BDEVNAME_SIZE];
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
113 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
114 * unlock the buffer. This is what ll_rw_block uses too.
116 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
119 set_buffer_uptodate(bh);
121 /* This happens, due to failed READA attempts. */
122 clear_buffer_uptodate(bh);
128 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
130 char b[BDEVNAME_SIZE];
133 set_buffer_uptodate(bh);
135 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137 printk(KERN_WARNING "lost page write due to "
139 bdevname(bh->b_bdev, b));
141 set_buffer_write_io_error(bh);
142 clear_buffer_uptodate(bh);
149 * Write out and wait upon all the dirty data associated with a block
150 * device via its mapping. Does not take the superblock lock.
152 int sync_blockdev(struct block_device *bdev)
157 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
160 EXPORT_SYMBOL(sync_blockdev);
163 * Write out and wait upon all dirty data associated with this
164 * device. Filesystem data as well as the underlying block
165 * device. Takes the superblock lock.
167 int fsync_bdev(struct block_device *bdev)
169 struct super_block *sb = get_super(bdev);
171 int res = fsync_super(sb);
175 return sync_blockdev(bdev);
179 * freeze_bdev -- lock a filesystem and force it into a consistent state
180 * @bdev: blockdevice to lock
182 * This takes the block device bd_mount_mutex to make sure no new mounts
183 * happen on bdev until thaw_bdev() is called.
184 * If a superblock is found on this device, we take the s_umount semaphore
185 * on it to make sure nobody unmounts until the snapshot creation is done.
187 struct super_block *freeze_bdev(struct block_device *bdev)
189 struct super_block *sb;
191 mutex_lock(&bdev->bd_mount_mutex);
192 sb = get_super(bdev);
193 if (sb && !(sb->s_flags & MS_RDONLY)) {
194 sb->s_frozen = SB_FREEZE_WRITE;
199 sb->s_frozen = SB_FREEZE_TRANS;
202 sync_blockdev(sb->s_bdev);
204 if (sb->s_op->write_super_lockfs)
205 sb->s_op->write_super_lockfs(sb);
209 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
211 EXPORT_SYMBOL(freeze_bdev);
214 * thaw_bdev -- unlock filesystem
215 * @bdev: blockdevice to unlock
216 * @sb: associated superblock
218 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
220 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
223 BUG_ON(sb->s_bdev != bdev);
225 if (sb->s_op->unlockfs)
226 sb->s_op->unlockfs(sb);
227 sb->s_frozen = SB_UNFROZEN;
229 wake_up(&sb->s_wait_unfrozen);
233 mutex_unlock(&bdev->bd_mount_mutex);
235 EXPORT_SYMBOL(thaw_bdev);
238 * Various filesystems appear to want __find_get_block to be non-blocking.
239 * But it's the page lock which protects the buffers. To get around this,
240 * we get exclusion from try_to_free_buffers with the blockdev mapping's
243 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
244 * may be quite high. This code could TryLock the page, and if that
245 * succeeds, there is no need to take private_lock. (But if
246 * private_lock is contended then so is mapping->tree_lock).
248 static struct buffer_head *
249 __find_get_block_slow(struct block_device *bdev, sector_t block)
251 struct inode *bd_inode = bdev->bd_inode;
252 struct address_space *bd_mapping = bd_inode->i_mapping;
253 struct buffer_head *ret = NULL;
255 struct buffer_head *bh;
256 struct buffer_head *head;
260 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
261 page = find_get_page(bd_mapping, index);
265 spin_lock(&bd_mapping->private_lock);
266 if (!page_has_buffers(page))
268 head = page_buffers(page);
271 if (bh->b_blocknr == block) {
276 if (!buffer_mapped(bh))
278 bh = bh->b_this_page;
279 } while (bh != head);
281 /* we might be here because some of the buffers on this page are
282 * not mapped. This is due to various races between
283 * file io on the block device and getblk. It gets dealt with
284 * elsewhere, don't buffer_error if we had some unmapped buffers
287 printk("__find_get_block_slow() failed. "
288 "block=%llu, b_blocknr=%llu\n",
289 (unsigned long long)block,
290 (unsigned long long)bh->b_blocknr);
291 printk("b_state=0x%08lx, b_size=%zu\n",
292 bh->b_state, bh->b_size);
293 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
296 spin_unlock(&bd_mapping->private_lock);
297 page_cache_release(page);
302 /* If invalidate_buffers() will trash dirty buffers, it means some kind
303 of fs corruption is going on. Trashing dirty data always imply losing
304 information that was supposed to be just stored on the physical layer
307 Thus invalidate_buffers in general usage is not allwowed to trash
308 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
309 be preserved. These buffers are simply skipped.
311 We also skip buffers which are still in use. For example this can
312 happen if a userspace program is reading the block device.
314 NOTE: In the case where the user removed a removable-media-disk even if
315 there's still dirty data not synced on disk (due a bug in the device driver
316 or due an error of the user), by not destroying the dirty buffers we could
317 generate corruption also on the next media inserted, thus a parameter is
318 necessary to handle this case in the most safe way possible (trying
319 to not corrupt also the new disk inserted with the data belonging to
320 the old now corrupted disk). Also for the ramdisk the natural thing
321 to do in order to release the ramdisk memory is to destroy dirty buffers.
323 These are two special cases. Normal usage imply the device driver
324 to issue a sync on the device (without waiting I/O completion) and
325 then an invalidate_buffers call that doesn't trash dirty buffers.
327 For handling cache coherency with the blkdev pagecache the 'update' case
328 is been introduced. It is needed to re-read from disk any pinned
329 buffer. NOTE: re-reading from disk is destructive so we can do it only
330 when we assume nobody is changing the buffercache under our I/O and when
331 we think the disk contains more recent information than the buffercache.
332 The update == 1 pass marks the buffers we need to update, the update == 2
333 pass does the actual I/O. */
334 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
336 struct address_space *mapping = bdev->bd_inode->i_mapping;
338 if (mapping->nrpages == 0)
341 invalidate_bh_lrus();
343 * FIXME: what about destroy_dirty_buffers?
344 * We really want to use invalidate_inode_pages2() for
345 * that, but not until that's cleaned up.
347 invalidate_inode_pages(mapping);
351 * Kick pdflush then try to free up some ZONE_NORMAL memory.
353 static void free_more_memory(void)
358 wakeup_pdflush(1024);
361 for_each_online_pgdat(pgdat) {
362 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
364 try_to_free_pages(zones, GFP_NOFS);
369 * I/O completion handler for block_read_full_page() - pages
370 * which come unlocked at the end of I/O.
372 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
375 struct buffer_head *first;
376 struct buffer_head *tmp;
378 int page_uptodate = 1;
380 BUG_ON(!buffer_async_read(bh));
384 set_buffer_uptodate(bh);
386 clear_buffer_uptodate(bh);
387 if (printk_ratelimit())
393 * Be _very_ careful from here on. Bad things can happen if
394 * two buffer heads end IO at almost the same time and both
395 * decide that the page is now completely done.
397 first = page_buffers(page);
398 local_irq_save(flags);
399 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
400 clear_buffer_async_read(bh);
404 if (!buffer_uptodate(tmp))
406 if (buffer_async_read(tmp)) {
407 BUG_ON(!buffer_locked(tmp));
410 tmp = tmp->b_this_page;
412 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
413 local_irq_restore(flags);
416 * If none of the buffers had errors and they are all
417 * uptodate then we can set the page uptodate.
419 if (page_uptodate && !PageError(page))
420 SetPageUptodate(page);
425 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
426 local_irq_restore(flags);
431 * Completion handler for block_write_full_page() - pages which are unlocked
432 * during I/O, and which have PageWriteback cleared upon I/O completion.
434 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
436 char b[BDEVNAME_SIZE];
438 struct buffer_head *first;
439 struct buffer_head *tmp;
442 BUG_ON(!buffer_async_write(bh));
446 set_buffer_uptodate(bh);
448 if (printk_ratelimit()) {
450 printk(KERN_WARNING "lost page write due to "
452 bdevname(bh->b_bdev, b));
454 set_bit(AS_EIO, &page->mapping->flags);
455 set_buffer_write_io_error(bh);
456 clear_buffer_uptodate(bh);
460 first = page_buffers(page);
461 local_irq_save(flags);
462 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
464 clear_buffer_async_write(bh);
466 tmp = bh->b_this_page;
468 if (buffer_async_write(tmp)) {
469 BUG_ON(!buffer_locked(tmp));
472 tmp = tmp->b_this_page;
474 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
475 local_irq_restore(flags);
476 end_page_writeback(page);
480 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
481 local_irq_restore(flags);
486 * If a page's buffers are under async readin (end_buffer_async_read
487 * completion) then there is a possibility that another thread of
488 * control could lock one of the buffers after it has completed
489 * but while some of the other buffers have not completed. This
490 * locked buffer would confuse end_buffer_async_read() into not unlocking
491 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
492 * that this buffer is not under async I/O.
494 * The page comes unlocked when it has no locked buffer_async buffers
497 * PageLocked prevents anyone starting new async I/O reads any of
500 * PageWriteback is used to prevent simultaneous writeout of the same
503 * PageLocked prevents anyone from starting writeback of a page which is
504 * under read I/O (PageWriteback is only ever set against a locked page).
506 static void mark_buffer_async_read(struct buffer_head *bh)
508 bh->b_end_io = end_buffer_async_read;
509 set_buffer_async_read(bh);
512 void mark_buffer_async_write(struct buffer_head *bh)
514 bh->b_end_io = end_buffer_async_write;
515 set_buffer_async_write(bh);
517 EXPORT_SYMBOL(mark_buffer_async_write);
521 * fs/buffer.c contains helper functions for buffer-backed address space's
522 * fsync functions. A common requirement for buffer-based filesystems is
523 * that certain data from the backing blockdev needs to be written out for
524 * a successful fsync(). For example, ext2 indirect blocks need to be
525 * written back and waited upon before fsync() returns.
527 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
528 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
529 * management of a list of dependent buffers at ->i_mapping->private_list.
531 * Locking is a little subtle: try_to_free_buffers() will remove buffers
532 * from their controlling inode's queue when they are being freed. But
533 * try_to_free_buffers() will be operating against the *blockdev* mapping
534 * at the time, not against the S_ISREG file which depends on those buffers.
535 * So the locking for private_list is via the private_lock in the address_space
536 * which backs the buffers. Which is different from the address_space
537 * against which the buffers are listed. So for a particular address_space,
538 * mapping->private_lock does *not* protect mapping->private_list! In fact,
539 * mapping->private_list will always be protected by the backing blockdev's
542 * Which introduces a requirement: all buffers on an address_space's
543 * ->private_list must be from the same address_space: the blockdev's.
545 * address_spaces which do not place buffers at ->private_list via these
546 * utility functions are free to use private_lock and private_list for
547 * whatever they want. The only requirement is that list_empty(private_list)
548 * be true at clear_inode() time.
550 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
551 * filesystems should do that. invalidate_inode_buffers() should just go
552 * BUG_ON(!list_empty).
554 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
555 * take an address_space, not an inode. And it should be called
556 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
559 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
560 * list if it is already on a list. Because if the buffer is on a list,
561 * it *must* already be on the right one. If not, the filesystem is being
562 * silly. This will save a ton of locking. But first we have to ensure
563 * that buffers are taken *off* the old inode's list when they are freed
564 * (presumably in truncate). That requires careful auditing of all
565 * filesystems (do it inside bforget()). It could also be done by bringing
570 * The buffer's backing address_space's private_lock must be held
572 static inline void __remove_assoc_queue(struct buffer_head *bh)
574 list_del_init(&bh->b_assoc_buffers);
575 WARN_ON(!bh->b_assoc_map);
576 if (buffer_write_io_error(bh))
577 set_bit(AS_EIO, &bh->b_assoc_map->flags);
578 bh->b_assoc_map = NULL;
581 int inode_has_buffers(struct inode *inode)
583 return !list_empty(&inode->i_data.private_list);
587 * osync is designed to support O_SYNC io. It waits synchronously for
588 * all already-submitted IO to complete, but does not queue any new
589 * writes to the disk.
591 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
592 * you dirty the buffers, and then use osync_inode_buffers to wait for
593 * completion. Any other dirty buffers which are not yet queued for
594 * write will not be flushed to disk by the osync.
596 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
598 struct buffer_head *bh;
604 list_for_each_prev(p, list) {
606 if (buffer_locked(bh)) {
610 if (!buffer_uptodate(bh))
622 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
624 * @mapping: the mapping which wants those buffers written
626 * Starts I/O against the buffers at mapping->private_list, and waits upon
629 * Basically, this is a convenience function for fsync().
630 * @mapping is a file or directory which needs those buffers to be written for
631 * a successful fsync().
633 int sync_mapping_buffers(struct address_space *mapping)
635 struct address_space *buffer_mapping = mapping->assoc_mapping;
637 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
640 return fsync_buffers_list(&buffer_mapping->private_lock,
641 &mapping->private_list);
643 EXPORT_SYMBOL(sync_mapping_buffers);
646 * Called when we've recently written block `bblock', and it is known that
647 * `bblock' was for a buffer_boundary() buffer. This means that the block at
648 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
649 * dirty, schedule it for IO. So that indirects merge nicely with their data.
651 void write_boundary_block(struct block_device *bdev,
652 sector_t bblock, unsigned blocksize)
654 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
656 if (buffer_dirty(bh))
657 ll_rw_block(WRITE, 1, &bh);
662 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
664 struct address_space *mapping = inode->i_mapping;
665 struct address_space *buffer_mapping = bh->b_page->mapping;
667 mark_buffer_dirty(bh);
668 if (!mapping->assoc_mapping) {
669 mapping->assoc_mapping = buffer_mapping;
671 BUG_ON(mapping->assoc_mapping != buffer_mapping);
673 if (list_empty(&bh->b_assoc_buffers)) {
674 spin_lock(&buffer_mapping->private_lock);
675 list_move_tail(&bh->b_assoc_buffers,
676 &mapping->private_list);
677 bh->b_assoc_map = mapping;
678 spin_unlock(&buffer_mapping->private_lock);
681 EXPORT_SYMBOL(mark_buffer_dirty_inode);
684 * Add a page to the dirty page list.
686 * It is a sad fact of life that this function is called from several places
687 * deeply under spinlocking. It may not sleep.
689 * If the page has buffers, the uptodate buffers are set dirty, to preserve
690 * dirty-state coherency between the page and the buffers. It the page does
691 * not have buffers then when they are later attached they will all be set
694 * The buffers are dirtied before the page is dirtied. There's a small race
695 * window in which a writepage caller may see the page cleanness but not the
696 * buffer dirtiness. That's fine. If this code were to set the page dirty
697 * before the buffers, a concurrent writepage caller could clear the page dirty
698 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
699 * page on the dirty page list.
701 * We use private_lock to lock against try_to_free_buffers while using the
702 * page's buffer list. Also use this to protect against clean buffers being
703 * added to the page after it was set dirty.
705 * FIXME: may need to call ->reservepage here as well. That's rather up to the
706 * address_space though.
708 int __set_page_dirty_buffers(struct page *page)
710 struct address_space * const mapping = page_mapping(page);
712 if (unlikely(!mapping))
713 return !TestSetPageDirty(page);
715 spin_lock(&mapping->private_lock);
716 if (page_has_buffers(page)) {
717 struct buffer_head *head = page_buffers(page);
718 struct buffer_head *bh = head;
721 set_buffer_dirty(bh);
722 bh = bh->b_this_page;
723 } while (bh != head);
725 spin_unlock(&mapping->private_lock);
727 if (TestSetPageDirty(page))
730 write_lock_irq(&mapping->tree_lock);
731 if (page->mapping) { /* Race with truncate? */
732 if (mapping_cap_account_dirty(mapping))
733 __inc_zone_page_state(page, NR_FILE_DIRTY);
734 radix_tree_tag_set(&mapping->page_tree,
735 page_index(page), PAGECACHE_TAG_DIRTY);
737 write_unlock_irq(&mapping->tree_lock);
738 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
741 EXPORT_SYMBOL(__set_page_dirty_buffers);
744 * Write out and wait upon a list of buffers.
746 * We have conflicting pressures: we want to make sure that all
747 * initially dirty buffers get waited on, but that any subsequently
748 * dirtied buffers don't. After all, we don't want fsync to last
749 * forever if somebody is actively writing to the file.
751 * Do this in two main stages: first we copy dirty buffers to a
752 * temporary inode list, queueing the writes as we go. Then we clean
753 * up, waiting for those writes to complete.
755 * During this second stage, any subsequent updates to the file may end
756 * up refiling the buffer on the original inode's dirty list again, so
757 * there is a chance we will end up with a buffer queued for write but
758 * not yet completed on that list. So, as a final cleanup we go through
759 * the osync code to catch these locked, dirty buffers without requeuing
760 * any newly dirty buffers for write.
762 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
764 struct buffer_head *bh;
765 struct list_head tmp;
768 INIT_LIST_HEAD(&tmp);
771 while (!list_empty(list)) {
772 bh = BH_ENTRY(list->next);
773 __remove_assoc_queue(bh);
774 if (buffer_dirty(bh) || buffer_locked(bh)) {
775 list_add(&bh->b_assoc_buffers, &tmp);
776 if (buffer_dirty(bh)) {
780 * Ensure any pending I/O completes so that
781 * ll_rw_block() actually writes the current
782 * contents - it is a noop if I/O is still in
783 * flight on potentially older contents.
785 ll_rw_block(SWRITE, 1, &bh);
792 while (!list_empty(&tmp)) {
793 bh = BH_ENTRY(tmp.prev);
794 list_del_init(&bh->b_assoc_buffers);
798 if (!buffer_uptodate(bh))
805 err2 = osync_buffers_list(lock, list);
813 * Invalidate any and all dirty buffers on a given inode. We are
814 * probably unmounting the fs, but that doesn't mean we have already
815 * done a sync(). Just drop the buffers from the inode list.
817 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
818 * assumes that all the buffers are against the blockdev. Not true
821 void invalidate_inode_buffers(struct inode *inode)
823 if (inode_has_buffers(inode)) {
824 struct address_space *mapping = &inode->i_data;
825 struct list_head *list = &mapping->private_list;
826 struct address_space *buffer_mapping = mapping->assoc_mapping;
828 spin_lock(&buffer_mapping->private_lock);
829 while (!list_empty(list))
830 __remove_assoc_queue(BH_ENTRY(list->next));
831 spin_unlock(&buffer_mapping->private_lock);
836 * Remove any clean buffers from the inode's buffer list. This is called
837 * when we're trying to free the inode itself. Those buffers can pin it.
839 * Returns true if all buffers were removed.
841 int remove_inode_buffers(struct inode *inode)
845 if (inode_has_buffers(inode)) {
846 struct address_space *mapping = &inode->i_data;
847 struct list_head *list = &mapping->private_list;
848 struct address_space *buffer_mapping = mapping->assoc_mapping;
850 spin_lock(&buffer_mapping->private_lock);
851 while (!list_empty(list)) {
852 struct buffer_head *bh = BH_ENTRY(list->next);
853 if (buffer_dirty(bh)) {
857 __remove_assoc_queue(bh);
859 spin_unlock(&buffer_mapping->private_lock);
865 * Create the appropriate buffers when given a page for data area and
866 * the size of each buffer.. Use the bh->b_this_page linked list to
867 * follow the buffers created. Return NULL if unable to create more
870 * The retry flag is used to differentiate async IO (paging, swapping)
871 * which may not fail from ordinary buffer allocations.
873 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
876 struct buffer_head *bh, *head;
882 while ((offset -= size) >= 0) {
883 bh = alloc_buffer_head(GFP_NOFS);
888 bh->b_this_page = head;
893 atomic_set(&bh->b_count, 0);
894 bh->b_private = NULL;
897 /* Link the buffer to its page */
898 set_bh_page(bh, page, offset);
900 init_buffer(bh, NULL, NULL);
904 * In case anything failed, we just free everything we got.
910 head = head->b_this_page;
911 free_buffer_head(bh);
916 * Return failure for non-async IO requests. Async IO requests
917 * are not allowed to fail, so we have to wait until buffer heads
918 * become available. But we don't want tasks sleeping with
919 * partially complete buffers, so all were released above.
924 /* We're _really_ low on memory. Now we just
925 * wait for old buffer heads to become free due to
926 * finishing IO. Since this is an async request and
927 * the reserve list is empty, we're sure there are
928 * async buffer heads in use.
933 EXPORT_SYMBOL_GPL(alloc_page_buffers);
936 link_dev_buffers(struct page *page, struct buffer_head *head)
938 struct buffer_head *bh, *tail;
943 bh = bh->b_this_page;
945 tail->b_this_page = head;
946 attach_page_buffers(page, head);
950 * Initialise the state of a blockdev page's buffers.
953 init_page_buffers(struct page *page, struct block_device *bdev,
954 sector_t block, int size)
956 struct buffer_head *head = page_buffers(page);
957 struct buffer_head *bh = head;
958 int uptodate = PageUptodate(page);
961 if (!buffer_mapped(bh)) {
962 init_buffer(bh, NULL, NULL);
964 bh->b_blocknr = block;
966 set_buffer_uptodate(bh);
967 set_buffer_mapped(bh);
970 bh = bh->b_this_page;
971 } while (bh != head);
975 * Create the page-cache page that contains the requested block.
977 * This is user purely for blockdev mappings.
980 grow_dev_page(struct block_device *bdev, sector_t block,
981 pgoff_t index, int size)
983 struct inode *inode = bdev->bd_inode;
985 struct buffer_head *bh;
987 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
991 BUG_ON(!PageLocked(page));
993 if (page_has_buffers(page)) {
994 bh = page_buffers(page);
995 if (bh->b_size == size) {
996 init_page_buffers(page, bdev, block, size);
999 if (!try_to_free_buffers(page))
1004 * Allocate some buffers for this page
1006 bh = alloc_page_buffers(page, size, 0);
1011 * Link the page to the buffers and initialise them. Take the
1012 * lock to be atomic wrt __find_get_block(), which does not
1013 * run under the page lock.
1015 spin_lock(&inode->i_mapping->private_lock);
1016 link_dev_buffers(page, bh);
1017 init_page_buffers(page, bdev, block, size);
1018 spin_unlock(&inode->i_mapping->private_lock);
1024 page_cache_release(page);
1029 * Create buffers for the specified block device block's page. If
1030 * that page was dirty, the buffers are set dirty also.
1032 * Except that's a bug. Attaching dirty buffers to a dirty
1033 * blockdev's page can result in filesystem corruption, because
1034 * some of those buffers may be aliases of filesystem data.
1035 * grow_dev_page() will go BUG() if this happens.
1038 grow_buffers(struct block_device *bdev, sector_t block, int size)
1047 } while ((size << sizebits) < PAGE_SIZE);
1049 index = block >> sizebits;
1052 * Check for a block which wants to lie outside our maximum possible
1053 * pagecache index. (this comparison is done using sector_t types).
1055 if (unlikely(index != block >> sizebits)) {
1056 char b[BDEVNAME_SIZE];
1058 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1060 __FUNCTION__, (unsigned long long)block,
1064 block = index << sizebits;
1065 /* Create a page with the proper size buffers.. */
1066 page = grow_dev_page(bdev, block, index, size);
1070 page_cache_release(page);
1074 static struct buffer_head *
1075 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1077 /* Size must be multiple of hard sectorsize */
1078 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1079 (size < 512 || size > PAGE_SIZE))) {
1080 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1082 printk(KERN_ERR "hardsect size: %d\n",
1083 bdev_hardsect_size(bdev));
1090 struct buffer_head * bh;
1093 bh = __find_get_block(bdev, block, size);
1097 ret = grow_buffers(bdev, block, size);
1106 * The relationship between dirty buffers and dirty pages:
1108 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1109 * the page is tagged dirty in its radix tree.
1111 * At all times, the dirtiness of the buffers represents the dirtiness of
1112 * subsections of the page. If the page has buffers, the page dirty bit is
1113 * merely a hint about the true dirty state.
1115 * When a page is set dirty in its entirety, all its buffers are marked dirty
1116 * (if the page has buffers).
1118 * When a buffer is marked dirty, its page is dirtied, but the page's other
1121 * Also. When blockdev buffers are explicitly read with bread(), they
1122 * individually become uptodate. But their backing page remains not
1123 * uptodate - even if all of its buffers are uptodate. A subsequent
1124 * block_read_full_page() against that page will discover all the uptodate
1125 * buffers, will set the page uptodate and will perform no I/O.
1129 * mark_buffer_dirty - mark a buffer_head as needing writeout
1130 * @bh: the buffer_head to mark dirty
1132 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1133 * backing page dirty, then tag the page as dirty in its address_space's radix
1134 * tree and then attach the address_space's inode to its superblock's dirty
1137 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1138 * mapping->tree_lock and the global inode_lock.
1140 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1142 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1143 __set_page_dirty_nobuffers(bh->b_page);
1147 * Decrement a buffer_head's reference count. If all buffers against a page
1148 * have zero reference count, are clean and unlocked, and if the page is clean
1149 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1150 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1151 * a page but it ends up not being freed, and buffers may later be reattached).
1153 void __brelse(struct buffer_head * buf)
1155 if (atomic_read(&buf->b_count)) {
1159 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1164 * bforget() is like brelse(), except it discards any
1165 * potentially dirty data.
1167 void __bforget(struct buffer_head *bh)
1169 clear_buffer_dirty(bh);
1170 if (!list_empty(&bh->b_assoc_buffers)) {
1171 struct address_space *buffer_mapping = bh->b_page->mapping;
1173 spin_lock(&buffer_mapping->private_lock);
1174 list_del_init(&bh->b_assoc_buffers);
1175 bh->b_assoc_map = NULL;
1176 spin_unlock(&buffer_mapping->private_lock);
1181 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1184 if (buffer_uptodate(bh)) {
1189 bh->b_end_io = end_buffer_read_sync;
1190 submit_bh(READ, bh);
1192 if (buffer_uptodate(bh))
1200 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1201 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1202 * refcount elevated by one when they're in an LRU. A buffer can only appear
1203 * once in a particular CPU's LRU. A single buffer can be present in multiple
1204 * CPU's LRUs at the same time.
1206 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1207 * sb_find_get_block().
1209 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1210 * a local interrupt disable for that.
1213 #define BH_LRU_SIZE 8
1216 struct buffer_head *bhs[BH_LRU_SIZE];
1219 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1222 #define bh_lru_lock() local_irq_disable()
1223 #define bh_lru_unlock() local_irq_enable()
1225 #define bh_lru_lock() preempt_disable()
1226 #define bh_lru_unlock() preempt_enable()
1229 static inline void check_irqs_on(void)
1231 #ifdef irqs_disabled
1232 BUG_ON(irqs_disabled());
1237 * The LRU management algorithm is dopey-but-simple. Sorry.
1239 static void bh_lru_install(struct buffer_head *bh)
1241 struct buffer_head *evictee = NULL;
1246 lru = &__get_cpu_var(bh_lrus);
1247 if (lru->bhs[0] != bh) {
1248 struct buffer_head *bhs[BH_LRU_SIZE];
1254 for (in = 0; in < BH_LRU_SIZE; in++) {
1255 struct buffer_head *bh2 = lru->bhs[in];
1260 if (out >= BH_LRU_SIZE) {
1261 BUG_ON(evictee != NULL);
1268 while (out < BH_LRU_SIZE)
1270 memcpy(lru->bhs, bhs, sizeof(bhs));
1279 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1281 static struct buffer_head *
1282 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1284 struct buffer_head *ret = NULL;
1290 lru = &__get_cpu_var(bh_lrus);
1291 for (i = 0; i < BH_LRU_SIZE; i++) {
1292 struct buffer_head *bh = lru->bhs[i];
1294 if (bh && bh->b_bdev == bdev &&
1295 bh->b_blocknr == block && bh->b_size == size) {
1298 lru->bhs[i] = lru->bhs[i - 1];
1313 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1314 * it in the LRU and mark it as accessed. If it is not present then return
1317 struct buffer_head *
1318 __find_get_block(struct block_device *bdev, sector_t block, int size)
1320 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1323 bh = __find_get_block_slow(bdev, block);
1331 EXPORT_SYMBOL(__find_get_block);
1334 * __getblk will locate (and, if necessary, create) the buffer_head
1335 * which corresponds to the passed block_device, block and size. The
1336 * returned buffer has its reference count incremented.
1338 * __getblk() cannot fail - it just keeps trying. If you pass it an
1339 * illegal block number, __getblk() will happily return a buffer_head
1340 * which represents the non-existent block. Very weird.
1342 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1343 * attempt is failing. FIXME, perhaps?
1345 struct buffer_head *
1346 __getblk(struct block_device *bdev, sector_t block, int size)
1348 struct buffer_head *bh = __find_get_block(bdev, block, size);
1352 bh = __getblk_slow(bdev, block, size);
1355 EXPORT_SYMBOL(__getblk);
1358 * Do async read-ahead on a buffer..
1360 void __breadahead(struct block_device *bdev, sector_t block, int size)
1362 struct buffer_head *bh = __getblk(bdev, block, size);
1364 ll_rw_block(READA, 1, &bh);
1368 EXPORT_SYMBOL(__breadahead);
1371 * __bread() - reads a specified block and returns the bh
1372 * @bdev: the block_device to read from
1373 * @block: number of block
1374 * @size: size (in bytes) to read
1376 * Reads a specified block, and returns buffer head that contains it.
1377 * It returns NULL if the block was unreadable.
1379 struct buffer_head *
1380 __bread(struct block_device *bdev, sector_t block, int size)
1382 struct buffer_head *bh = __getblk(bdev, block, size);
1384 if (likely(bh) && !buffer_uptodate(bh))
1385 bh = __bread_slow(bh);
1388 EXPORT_SYMBOL(__bread);
1391 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1392 * This doesn't race because it runs in each cpu either in irq
1393 * or with preempt disabled.
1395 static void invalidate_bh_lru(void *arg)
1397 struct bh_lru *b = &get_cpu_var(bh_lrus);
1400 for (i = 0; i < BH_LRU_SIZE; i++) {
1404 put_cpu_var(bh_lrus);
1407 static void invalidate_bh_lrus(void)
1409 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1412 void set_bh_page(struct buffer_head *bh,
1413 struct page *page, unsigned long offset)
1416 BUG_ON(offset >= PAGE_SIZE);
1417 if (PageHighMem(page))
1419 * This catches illegal uses and preserves the offset:
1421 bh->b_data = (char *)(0 + offset);
1423 bh->b_data = page_address(page) + offset;
1425 EXPORT_SYMBOL(set_bh_page);
1428 * Called when truncating a buffer on a page completely.
1430 static void discard_buffer(struct buffer_head * bh)
1433 clear_buffer_dirty(bh);
1435 clear_buffer_mapped(bh);
1436 clear_buffer_req(bh);
1437 clear_buffer_new(bh);
1438 clear_buffer_delay(bh);
1443 * block_invalidatepage - invalidate part of all of a buffer-backed page
1445 * @page: the page which is affected
1446 * @offset: the index of the truncation point
1448 * block_invalidatepage() is called when all or part of the page has become
1449 * invalidatedby a truncate operation.
1451 * block_invalidatepage() does not have to release all buffers, but it must
1452 * ensure that no dirty buffer is left outside @offset and that no I/O
1453 * is underway against any of the blocks which are outside the truncation
1454 * point. Because the caller is about to free (and possibly reuse) those
1457 void block_invalidatepage(struct page *page, unsigned long offset)
1459 struct buffer_head *head, *bh, *next;
1460 unsigned int curr_off = 0;
1462 BUG_ON(!PageLocked(page));
1463 if (!page_has_buffers(page))
1466 head = page_buffers(page);
1469 unsigned int next_off = curr_off + bh->b_size;
1470 next = bh->b_this_page;
1473 * is this block fully invalidated?
1475 if (offset <= curr_off)
1477 curr_off = next_off;
1479 } while (bh != head);
1482 * We release buffers only if the entire page is being invalidated.
1483 * The get_block cached value has been unconditionally invalidated,
1484 * so real IO is not possible anymore.
1487 try_to_release_page(page, 0);
1491 EXPORT_SYMBOL(block_invalidatepage);
1494 * We attach and possibly dirty the buffers atomically wrt
1495 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1496 * is already excluded via the page lock.
1498 void create_empty_buffers(struct page *page,
1499 unsigned long blocksize, unsigned long b_state)
1501 struct buffer_head *bh, *head, *tail;
1503 head = alloc_page_buffers(page, blocksize, 1);
1506 bh->b_state |= b_state;
1508 bh = bh->b_this_page;
1510 tail->b_this_page = head;
1512 spin_lock(&page->mapping->private_lock);
1513 if (PageUptodate(page) || PageDirty(page)) {
1516 if (PageDirty(page))
1517 set_buffer_dirty(bh);
1518 if (PageUptodate(page))
1519 set_buffer_uptodate(bh);
1520 bh = bh->b_this_page;
1521 } while (bh != head);
1523 attach_page_buffers(page, head);
1524 spin_unlock(&page->mapping->private_lock);
1526 EXPORT_SYMBOL(create_empty_buffers);
1529 * We are taking a block for data and we don't want any output from any
1530 * buffer-cache aliases starting from return from that function and
1531 * until the moment when something will explicitly mark the buffer
1532 * dirty (hopefully that will not happen until we will free that block ;-)
1533 * We don't even need to mark it not-uptodate - nobody can expect
1534 * anything from a newly allocated buffer anyway. We used to used
1535 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1536 * don't want to mark the alias unmapped, for example - it would confuse
1537 * anyone who might pick it with bread() afterwards...
1539 * Also.. Note that bforget() doesn't lock the buffer. So there can
1540 * be writeout I/O going on against recently-freed buffers. We don't
1541 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1542 * only if we really need to. That happens here.
1544 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1546 struct buffer_head *old_bh;
1550 old_bh = __find_get_block_slow(bdev, block);
1552 clear_buffer_dirty(old_bh);
1553 wait_on_buffer(old_bh);
1554 clear_buffer_req(old_bh);
1558 EXPORT_SYMBOL(unmap_underlying_metadata);
1561 * NOTE! All mapped/uptodate combinations are valid:
1563 * Mapped Uptodate Meaning
1565 * No No "unknown" - must do get_block()
1566 * No Yes "hole" - zero-filled
1567 * Yes No "allocated" - allocated on disk, not read in
1568 * Yes Yes "valid" - allocated and up-to-date in memory.
1570 * "Dirty" is valid only with the last case (mapped+uptodate).
1574 * While block_write_full_page is writing back the dirty buffers under
1575 * the page lock, whoever dirtied the buffers may decide to clean them
1576 * again at any time. We handle that by only looking at the buffer
1577 * state inside lock_buffer().
1579 * If block_write_full_page() is called for regular writeback
1580 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1581 * locked buffer. This only can happen if someone has written the buffer
1582 * directly, with submit_bh(). At the address_space level PageWriteback
1583 * prevents this contention from occurring.
1585 static int __block_write_full_page(struct inode *inode, struct page *page,
1586 get_block_t *get_block, struct writeback_control *wbc)
1590 sector_t last_block;
1591 struct buffer_head *bh, *head;
1592 const unsigned blocksize = 1 << inode->i_blkbits;
1593 int nr_underway = 0;
1595 BUG_ON(!PageLocked(page));
1597 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1599 if (!page_has_buffers(page)) {
1600 create_empty_buffers(page, blocksize,
1601 (1 << BH_Dirty)|(1 << BH_Uptodate));
1605 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1606 * here, and the (potentially unmapped) buffers may become dirty at
1607 * any time. If a buffer becomes dirty here after we've inspected it
1608 * then we just miss that fact, and the page stays dirty.
1610 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1611 * handle that here by just cleaning them.
1614 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1615 head = page_buffers(page);
1619 * Get all the dirty buffers mapped to disk addresses and
1620 * handle any aliases from the underlying blockdev's mapping.
1623 if (block > last_block) {
1625 * mapped buffers outside i_size will occur, because
1626 * this page can be outside i_size when there is a
1627 * truncate in progress.
1630 * The buffer was zeroed by block_write_full_page()
1632 clear_buffer_dirty(bh);
1633 set_buffer_uptodate(bh);
1634 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1635 WARN_ON(bh->b_size != blocksize);
1636 err = get_block(inode, block, bh, 1);
1639 if (buffer_new(bh)) {
1640 /* blockdev mappings never come here */
1641 clear_buffer_new(bh);
1642 unmap_underlying_metadata(bh->b_bdev,
1646 bh = bh->b_this_page;
1648 } while (bh != head);
1651 if (!buffer_mapped(bh))
1654 * If it's a fully non-blocking write attempt and we cannot
1655 * lock the buffer then redirty the page. Note that this can
1656 * potentially cause a busy-wait loop from pdflush and kswapd
1657 * activity, but those code paths have their own higher-level
1660 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1662 } else if (test_set_buffer_locked(bh)) {
1663 redirty_page_for_writepage(wbc, page);
1666 if (test_clear_buffer_dirty(bh)) {
1667 mark_buffer_async_write(bh);
1671 } while ((bh = bh->b_this_page) != head);
1674 * The page and its buffers are protected by PageWriteback(), so we can
1675 * drop the bh refcounts early.
1677 BUG_ON(PageWriteback(page));
1678 set_page_writeback(page);
1681 struct buffer_head *next = bh->b_this_page;
1682 if (buffer_async_write(bh)) {
1683 submit_bh(WRITE, bh);
1687 } while (bh != head);
1692 if (nr_underway == 0) {
1694 * The page was marked dirty, but the buffers were
1695 * clean. Someone wrote them back by hand with
1696 * ll_rw_block/submit_bh. A rare case.
1700 if (!buffer_uptodate(bh)) {
1704 bh = bh->b_this_page;
1705 } while (bh != head);
1707 SetPageUptodate(page);
1708 end_page_writeback(page);
1710 * The page and buffer_heads can be released at any time from
1713 wbc->pages_skipped++; /* We didn't write this page */
1719 * ENOSPC, or some other error. We may already have added some
1720 * blocks to the file, so we need to write these out to avoid
1721 * exposing stale data.
1722 * The page is currently locked and not marked for writeback
1725 /* Recovery: lock and submit the mapped buffers */
1727 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1729 mark_buffer_async_write(bh);
1732 * The buffer may have been set dirty during
1733 * attachment to a dirty page.
1735 clear_buffer_dirty(bh);
1737 } while ((bh = bh->b_this_page) != head);
1739 BUG_ON(PageWriteback(page));
1740 set_page_writeback(page);
1743 struct buffer_head *next = bh->b_this_page;
1744 if (buffer_async_write(bh)) {
1745 clear_buffer_dirty(bh);
1746 submit_bh(WRITE, bh);
1750 } while (bh != head);
1754 static int __block_prepare_write(struct inode *inode, struct page *page,
1755 unsigned from, unsigned to, get_block_t *get_block)
1757 unsigned block_start, block_end;
1760 unsigned blocksize, bbits;
1761 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1763 BUG_ON(!PageLocked(page));
1764 BUG_ON(from > PAGE_CACHE_SIZE);
1765 BUG_ON(to > PAGE_CACHE_SIZE);
1768 blocksize = 1 << inode->i_blkbits;
1769 if (!page_has_buffers(page))
1770 create_empty_buffers(page, blocksize, 0);
1771 head = page_buffers(page);
1773 bbits = inode->i_blkbits;
1774 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1776 for(bh = head, block_start = 0; bh != head || !block_start;
1777 block++, block_start=block_end, bh = bh->b_this_page) {
1778 block_end = block_start + blocksize;
1779 if (block_end <= from || block_start >= to) {
1780 if (PageUptodate(page)) {
1781 if (!buffer_uptodate(bh))
1782 set_buffer_uptodate(bh);
1787 clear_buffer_new(bh);
1788 if (!buffer_mapped(bh)) {
1789 WARN_ON(bh->b_size != blocksize);
1790 err = get_block(inode, block, bh, 1);
1793 if (buffer_new(bh)) {
1794 unmap_underlying_metadata(bh->b_bdev,
1796 if (PageUptodate(page)) {
1797 set_buffer_uptodate(bh);
1800 if (block_end > to || block_start < from) {
1803 kaddr = kmap_atomic(page, KM_USER0);
1807 if (block_start < from)
1808 memset(kaddr+block_start,
1809 0, from-block_start);
1810 flush_dcache_page(page);
1811 kunmap_atomic(kaddr, KM_USER0);
1816 if (PageUptodate(page)) {
1817 if (!buffer_uptodate(bh))
1818 set_buffer_uptodate(bh);
1821 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1822 (block_start < from || block_end > to)) {
1823 ll_rw_block(READ, 1, &bh);
1828 * If we issued read requests - let them complete.
1830 while(wait_bh > wait) {
1831 wait_on_buffer(*--wait_bh);
1832 if (!buffer_uptodate(*wait_bh))
1839 clear_buffer_new(bh);
1840 } while ((bh = bh->b_this_page) != head);
1845 * Zero out any newly allocated blocks to avoid exposing stale
1846 * data. If BH_New is set, we know that the block was newly
1847 * allocated in the above loop.
1852 block_end = block_start+blocksize;
1853 if (block_end <= from)
1855 if (block_start >= to)
1857 if (buffer_new(bh)) {
1860 clear_buffer_new(bh);
1861 kaddr = kmap_atomic(page, KM_USER0);
1862 memset(kaddr+block_start, 0, bh->b_size);
1863 flush_dcache_page(page);
1864 kunmap_atomic(kaddr, KM_USER0);
1865 set_buffer_uptodate(bh);
1866 mark_buffer_dirty(bh);
1869 block_start = block_end;
1870 bh = bh->b_this_page;
1871 } while (bh != head);
1875 static int __block_commit_write(struct inode *inode, struct page *page,
1876 unsigned from, unsigned to)
1878 unsigned block_start, block_end;
1881 struct buffer_head *bh, *head;
1883 blocksize = 1 << inode->i_blkbits;
1885 for(bh = head = page_buffers(page), block_start = 0;
1886 bh != head || !block_start;
1887 block_start=block_end, bh = bh->b_this_page) {
1888 block_end = block_start + blocksize;
1889 if (block_end <= from || block_start >= to) {
1890 if (!buffer_uptodate(bh))
1893 set_buffer_uptodate(bh);
1894 mark_buffer_dirty(bh);
1899 * If this is a partial write which happened to make all buffers
1900 * uptodate then we can optimize away a bogus readpage() for
1901 * the next read(). Here we 'discover' whether the page went
1902 * uptodate as a result of this (potentially partial) write.
1905 SetPageUptodate(page);
1910 * Generic "read page" function for block devices that have the normal
1911 * get_block functionality. This is most of the block device filesystems.
1912 * Reads the page asynchronously --- the unlock_buffer() and
1913 * set/clear_buffer_uptodate() functions propagate buffer state into the
1914 * page struct once IO has completed.
1916 int block_read_full_page(struct page *page, get_block_t *get_block)
1918 struct inode *inode = page->mapping->host;
1919 sector_t iblock, lblock;
1920 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1921 unsigned int blocksize;
1923 int fully_mapped = 1;
1925 BUG_ON(!PageLocked(page));
1926 blocksize = 1 << inode->i_blkbits;
1927 if (!page_has_buffers(page))
1928 create_empty_buffers(page, blocksize, 0);
1929 head = page_buffers(page);
1931 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1932 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1938 if (buffer_uptodate(bh))
1941 if (!buffer_mapped(bh)) {
1945 if (iblock < lblock) {
1946 WARN_ON(bh->b_size != blocksize);
1947 err = get_block(inode, iblock, bh, 0);
1951 if (!buffer_mapped(bh)) {
1952 void *kaddr = kmap_atomic(page, KM_USER0);
1953 memset(kaddr + i * blocksize, 0, blocksize);
1954 flush_dcache_page(page);
1955 kunmap_atomic(kaddr, KM_USER0);
1957 set_buffer_uptodate(bh);
1961 * get_block() might have updated the buffer
1964 if (buffer_uptodate(bh))
1968 } while (i++, iblock++, (bh = bh->b_this_page) != head);
1971 SetPageMappedToDisk(page);
1975 * All buffers are uptodate - we can set the page uptodate
1976 * as well. But not if get_block() returned an error.
1978 if (!PageError(page))
1979 SetPageUptodate(page);
1984 /* Stage two: lock the buffers */
1985 for (i = 0; i < nr; i++) {
1988 mark_buffer_async_read(bh);
1992 * Stage 3: start the IO. Check for uptodateness
1993 * inside the buffer lock in case another process reading
1994 * the underlying blockdev brought it uptodate (the sct fix).
1996 for (i = 0; i < nr; i++) {
1998 if (buffer_uptodate(bh))
1999 end_buffer_async_read(bh, 1);
2001 submit_bh(READ, bh);
2006 /* utility function for filesystems that need to do work on expanding
2007 * truncates. Uses prepare/commit_write to allow the filesystem to
2008 * deal with the hole.
2010 static int __generic_cont_expand(struct inode *inode, loff_t size,
2011 pgoff_t index, unsigned int offset)
2013 struct address_space *mapping = inode->i_mapping;
2015 unsigned long limit;
2019 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2020 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2021 send_sig(SIGXFSZ, current, 0);
2024 if (size > inode->i_sb->s_maxbytes)
2028 page = grab_cache_page(mapping, index);
2031 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2034 * ->prepare_write() may have instantiated a few blocks
2035 * outside i_size. Trim these off again.
2038 page_cache_release(page);
2039 vmtruncate(inode, inode->i_size);
2043 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2046 page_cache_release(page);
2053 int generic_cont_expand(struct inode *inode, loff_t size)
2056 unsigned int offset;
2058 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2060 /* ugh. in prepare/commit_write, if from==to==start of block, we
2061 ** skip the prepare. make sure we never send an offset for the start
2064 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2065 /* caller must handle this extra byte. */
2068 index = size >> PAGE_CACHE_SHIFT;
2070 return __generic_cont_expand(inode, size, index, offset);
2073 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2075 loff_t pos = size - 1;
2076 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2077 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2079 /* prepare/commit_write can handle even if from==to==start of block. */
2080 return __generic_cont_expand(inode, size, index, offset);
2084 * For moronic filesystems that do not allow holes in file.
2085 * We may have to extend the file.
2088 int cont_prepare_write(struct page *page, unsigned offset,
2089 unsigned to, get_block_t *get_block, loff_t *bytes)
2091 struct address_space *mapping = page->mapping;
2092 struct inode *inode = mapping->host;
2093 struct page *new_page;
2097 unsigned blocksize = 1 << inode->i_blkbits;
2100 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2102 new_page = grab_cache_page(mapping, pgpos);
2105 /* we might sleep */
2106 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2107 unlock_page(new_page);
2108 page_cache_release(new_page);
2111 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2112 if (zerofrom & (blocksize-1)) {
2113 *bytes |= (blocksize-1);
2116 status = __block_prepare_write(inode, new_page, zerofrom,
2117 PAGE_CACHE_SIZE, get_block);
2120 kaddr = kmap_atomic(new_page, KM_USER0);
2121 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2122 flush_dcache_page(new_page);
2123 kunmap_atomic(kaddr, KM_USER0);
2124 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2125 unlock_page(new_page);
2126 page_cache_release(new_page);
2129 if (page->index < pgpos) {
2130 /* completely inside the area */
2133 /* page covers the boundary, find the boundary offset */
2134 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2136 /* if we will expand the thing last block will be filled */
2137 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2138 *bytes |= (blocksize-1);
2142 /* starting below the boundary? Nothing to zero out */
2143 if (offset <= zerofrom)
2146 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2149 if (zerofrom < offset) {
2150 kaddr = kmap_atomic(page, KM_USER0);
2151 memset(kaddr+zerofrom, 0, offset-zerofrom);
2152 flush_dcache_page(page);
2153 kunmap_atomic(kaddr, KM_USER0);
2154 __block_commit_write(inode, page, zerofrom, offset);
2158 ClearPageUptodate(page);
2162 ClearPageUptodate(new_page);
2163 unlock_page(new_page);
2164 page_cache_release(new_page);
2169 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2170 get_block_t *get_block)
2172 struct inode *inode = page->mapping->host;
2173 int err = __block_prepare_write(inode, page, from, to, get_block);
2175 ClearPageUptodate(page);
2179 int block_commit_write(struct page *page, unsigned from, unsigned to)
2181 struct inode *inode = page->mapping->host;
2182 __block_commit_write(inode,page,from,to);
2186 int generic_commit_write(struct file *file, struct page *page,
2187 unsigned from, unsigned to)
2189 struct inode *inode = page->mapping->host;
2190 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2191 __block_commit_write(inode,page,from,to);
2193 * No need to use i_size_read() here, the i_size
2194 * cannot change under us because we hold i_mutex.
2196 if (pos > inode->i_size) {
2197 i_size_write(inode, pos);
2198 mark_inode_dirty(inode);
2205 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2206 * immediately, while under the page lock. So it needs a special end_io
2207 * handler which does not touch the bh after unlocking it.
2209 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2210 * a race there is benign: unlock_buffer() only use the bh's address for
2211 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2214 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2217 set_buffer_uptodate(bh);
2219 /* This happens, due to failed READA attempts. */
2220 clear_buffer_uptodate(bh);
2226 * On entry, the page is fully not uptodate.
2227 * On exit the page is fully uptodate in the areas outside (from,to)
2229 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2230 get_block_t *get_block)
2232 struct inode *inode = page->mapping->host;
2233 const unsigned blkbits = inode->i_blkbits;
2234 const unsigned blocksize = 1 << blkbits;
2235 struct buffer_head map_bh;
2236 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2237 unsigned block_in_page;
2238 unsigned block_start;
2239 sector_t block_in_file;
2244 int is_mapped_to_disk = 1;
2247 if (PageMappedToDisk(page))
2250 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2251 map_bh.b_page = page;
2254 * We loop across all blocks in the page, whether or not they are
2255 * part of the affected region. This is so we can discover if the
2256 * page is fully mapped-to-disk.
2258 for (block_start = 0, block_in_page = 0;
2259 block_start < PAGE_CACHE_SIZE;
2260 block_in_page++, block_start += blocksize) {
2261 unsigned block_end = block_start + blocksize;
2266 if (block_start >= to)
2268 map_bh.b_size = blocksize;
2269 ret = get_block(inode, block_in_file + block_in_page,
2273 if (!buffer_mapped(&map_bh))
2274 is_mapped_to_disk = 0;
2275 if (buffer_new(&map_bh))
2276 unmap_underlying_metadata(map_bh.b_bdev,
2278 if (PageUptodate(page))
2280 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2281 kaddr = kmap_atomic(page, KM_USER0);
2282 if (block_start < from) {
2283 memset(kaddr+block_start, 0, from-block_start);
2286 if (block_end > to) {
2287 memset(kaddr + to, 0, block_end - to);
2290 flush_dcache_page(page);
2291 kunmap_atomic(kaddr, KM_USER0);
2294 if (buffer_uptodate(&map_bh))
2295 continue; /* reiserfs does this */
2296 if (block_start < from || block_end > to) {
2297 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2303 bh->b_state = map_bh.b_state;
2304 atomic_set(&bh->b_count, 0);
2305 bh->b_this_page = NULL;
2307 bh->b_blocknr = map_bh.b_blocknr;
2308 bh->b_size = blocksize;
2309 bh->b_data = (char *)(long)block_start;
2310 bh->b_bdev = map_bh.b_bdev;
2311 bh->b_private = NULL;
2312 read_bh[nr_reads++] = bh;
2317 struct buffer_head *bh;
2320 * The page is locked, so these buffers are protected from
2321 * any VM or truncate activity. Hence we don't need to care
2322 * for the buffer_head refcounts.
2324 for (i = 0; i < nr_reads; i++) {
2327 bh->b_end_io = end_buffer_read_nobh;
2328 submit_bh(READ, bh);
2330 for (i = 0; i < nr_reads; i++) {
2333 if (!buffer_uptodate(bh))
2335 free_buffer_head(bh);
2342 if (is_mapped_to_disk)
2343 SetPageMappedToDisk(page);
2344 SetPageUptodate(page);
2347 * Setting the page dirty here isn't necessary for the prepare_write
2348 * function - commit_write will do that. But if/when this function is
2349 * used within the pagefault handler to ensure that all mmapped pages
2350 * have backing space in the filesystem, we will need to dirty the page
2351 * if its contents were altered.
2354 set_page_dirty(page);
2359 for (i = 0; i < nr_reads; i++) {
2361 free_buffer_head(read_bh[i]);
2365 * Error recovery is pretty slack. Clear the page and mark it dirty
2366 * so we'll later zero out any blocks which _were_ allocated.
2368 kaddr = kmap_atomic(page, KM_USER0);
2369 memset(kaddr, 0, PAGE_CACHE_SIZE);
2370 flush_dcache_page(page);
2371 kunmap_atomic(kaddr, KM_USER0);
2372 SetPageUptodate(page);
2373 set_page_dirty(page);
2376 EXPORT_SYMBOL(nobh_prepare_write);
2378 int nobh_commit_write(struct file *file, struct page *page,
2379 unsigned from, unsigned to)
2381 struct inode *inode = page->mapping->host;
2382 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2384 set_page_dirty(page);
2385 if (pos > inode->i_size) {
2386 i_size_write(inode, pos);
2387 mark_inode_dirty(inode);
2391 EXPORT_SYMBOL(nobh_commit_write);
2394 * nobh_writepage() - based on block_full_write_page() except
2395 * that it tries to operate without attaching bufferheads to
2398 int nobh_writepage(struct page *page, get_block_t *get_block,
2399 struct writeback_control *wbc)
2401 struct inode * const inode = page->mapping->host;
2402 loff_t i_size = i_size_read(inode);
2403 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2408 /* Is the page fully inside i_size? */
2409 if (page->index < end_index)
2412 /* Is the page fully outside i_size? (truncate in progress) */
2413 offset = i_size & (PAGE_CACHE_SIZE-1);
2414 if (page->index >= end_index+1 || !offset) {
2416 * The page may have dirty, unmapped buffers. For example,
2417 * they may have been added in ext3_writepage(). Make them
2418 * freeable here, so the page does not leak.
2421 /* Not really sure about this - do we need this ? */
2422 if (page->mapping->a_ops->invalidatepage)
2423 page->mapping->a_ops->invalidatepage(page, offset);
2426 return 0; /* don't care */
2430 * The page straddles i_size. It must be zeroed out on each and every
2431 * writepage invocation because it may be mmapped. "A file is mapped
2432 * in multiples of the page size. For a file that is not a multiple of
2433 * the page size, the remaining memory is zeroed when mapped, and
2434 * writes to that region are not written out to the file."
2436 kaddr = kmap_atomic(page, KM_USER0);
2437 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2438 flush_dcache_page(page);
2439 kunmap_atomic(kaddr, KM_USER0);
2441 ret = mpage_writepage(page, get_block, wbc);
2443 ret = __block_write_full_page(inode, page, get_block, wbc);
2446 EXPORT_SYMBOL(nobh_writepage);
2449 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2451 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2453 struct inode *inode = mapping->host;
2454 unsigned blocksize = 1 << inode->i_blkbits;
2455 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2456 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2459 const struct address_space_operations *a_ops = mapping->a_ops;
2463 if ((offset & (blocksize - 1)) == 0)
2467 page = grab_cache_page(mapping, index);
2471 to = (offset + blocksize) & ~(blocksize - 1);
2472 ret = a_ops->prepare_write(NULL, page, offset, to);
2474 kaddr = kmap_atomic(page, KM_USER0);
2475 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2476 flush_dcache_page(page);
2477 kunmap_atomic(kaddr, KM_USER0);
2478 set_page_dirty(page);
2481 page_cache_release(page);
2485 EXPORT_SYMBOL(nobh_truncate_page);
2487 int block_truncate_page(struct address_space *mapping,
2488 loff_t from, get_block_t *get_block)
2490 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2491 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2494 unsigned length, pos;
2495 struct inode *inode = mapping->host;
2497 struct buffer_head *bh;
2501 blocksize = 1 << inode->i_blkbits;
2502 length = offset & (blocksize - 1);
2504 /* Block boundary? Nothing to do */
2508 length = blocksize - length;
2509 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2511 page = grab_cache_page(mapping, index);
2516 if (!page_has_buffers(page))
2517 create_empty_buffers(page, blocksize, 0);
2519 /* Find the buffer that contains "offset" */
2520 bh = page_buffers(page);
2522 while (offset >= pos) {
2523 bh = bh->b_this_page;
2529 if (!buffer_mapped(bh)) {
2530 WARN_ON(bh->b_size != blocksize);
2531 err = get_block(inode, iblock, bh, 0);
2534 /* unmapped? It's a hole - nothing to do */
2535 if (!buffer_mapped(bh))
2539 /* Ok, it's mapped. Make sure it's up-to-date */
2540 if (PageUptodate(page))
2541 set_buffer_uptodate(bh);
2543 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2545 ll_rw_block(READ, 1, &bh);
2547 /* Uhhuh. Read error. Complain and punt. */
2548 if (!buffer_uptodate(bh))
2552 kaddr = kmap_atomic(page, KM_USER0);
2553 memset(kaddr + offset, 0, length);
2554 flush_dcache_page(page);
2555 kunmap_atomic(kaddr, KM_USER0);
2557 mark_buffer_dirty(bh);
2562 page_cache_release(page);
2568 * The generic ->writepage function for buffer-backed address_spaces
2570 int block_write_full_page(struct page *page, get_block_t *get_block,
2571 struct writeback_control *wbc)
2573 struct inode * const inode = page->mapping->host;
2574 loff_t i_size = i_size_read(inode);
2575 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2579 /* Is the page fully inside i_size? */
2580 if (page->index < end_index)
2581 return __block_write_full_page(inode, page, get_block, wbc);
2583 /* Is the page fully outside i_size? (truncate in progress) */
2584 offset = i_size & (PAGE_CACHE_SIZE-1);
2585 if (page->index >= end_index+1 || !offset) {
2587 * The page may have dirty, unmapped buffers. For example,
2588 * they may have been added in ext3_writepage(). Make them
2589 * freeable here, so the page does not leak.
2591 do_invalidatepage(page, 0);
2593 return 0; /* don't care */
2597 * The page straddles i_size. It must be zeroed out on each and every
2598 * writepage invokation because it may be mmapped. "A file is mapped
2599 * in multiples of the page size. For a file that is not a multiple of
2600 * the page size, the remaining memory is zeroed when mapped, and
2601 * writes to that region are not written out to the file."
2603 kaddr = kmap_atomic(page, KM_USER0);
2604 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2605 flush_dcache_page(page);
2606 kunmap_atomic(kaddr, KM_USER0);
2607 return __block_write_full_page(inode, page, get_block, wbc);
2610 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2611 get_block_t *get_block)
2613 struct buffer_head tmp;
2614 struct inode *inode = mapping->host;
2617 tmp.b_size = 1 << inode->i_blkbits;
2618 get_block(inode, block, &tmp, 0);
2619 return tmp.b_blocknr;
2622 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2624 struct buffer_head *bh = bio->bi_private;
2629 if (err == -EOPNOTSUPP) {
2630 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2631 set_bit(BH_Eopnotsupp, &bh->b_state);
2634 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2639 int submit_bh(int rw, struct buffer_head * bh)
2644 BUG_ON(!buffer_locked(bh));
2645 BUG_ON(!buffer_mapped(bh));
2646 BUG_ON(!bh->b_end_io);
2648 if (buffer_ordered(bh) && (rw == WRITE))
2652 * Only clear out a write error when rewriting, should this
2653 * include WRITE_SYNC as well?
2655 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2656 clear_buffer_write_io_error(bh);
2659 * from here on down, it's all bio -- do the initial mapping,
2660 * submit_bio -> generic_make_request may further map this bio around
2662 bio = bio_alloc(GFP_NOIO, 1);
2664 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2665 bio->bi_bdev = bh->b_bdev;
2666 bio->bi_io_vec[0].bv_page = bh->b_page;
2667 bio->bi_io_vec[0].bv_len = bh->b_size;
2668 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2672 bio->bi_size = bh->b_size;
2674 bio->bi_end_io = end_bio_bh_io_sync;
2675 bio->bi_private = bh;
2678 submit_bio(rw, bio);
2680 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2688 * ll_rw_block: low-level access to block devices (DEPRECATED)
2689 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2690 * @nr: number of &struct buffer_heads in the array
2691 * @bhs: array of pointers to &struct buffer_head
2693 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2694 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2695 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2696 * are sent to disk. The fourth %READA option is described in the documentation
2697 * for generic_make_request() which ll_rw_block() calls.
2699 * This function drops any buffer that it cannot get a lock on (with the
2700 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2701 * clean when doing a write request, and any buffer that appears to be
2702 * up-to-date when doing read request. Further it marks as clean buffers that
2703 * are processed for writing (the buffer cache won't assume that they are
2704 * actually clean until the buffer gets unlocked).
2706 * ll_rw_block sets b_end_io to simple completion handler that marks
2707 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2710 * All of the buffers must be for the same device, and must also be a
2711 * multiple of the current approved size for the device.
2713 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2717 for (i = 0; i < nr; i++) {
2718 struct buffer_head *bh = bhs[i];
2722 else if (test_set_buffer_locked(bh))
2725 if (rw == WRITE || rw == SWRITE) {
2726 if (test_clear_buffer_dirty(bh)) {
2727 bh->b_end_io = end_buffer_write_sync;
2729 submit_bh(WRITE, bh);
2733 if (!buffer_uptodate(bh)) {
2734 bh->b_end_io = end_buffer_read_sync;
2745 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2746 * and then start new I/O and then wait upon it. The caller must have a ref on
2749 int sync_dirty_buffer(struct buffer_head *bh)
2753 WARN_ON(atomic_read(&bh->b_count) < 1);
2755 if (test_clear_buffer_dirty(bh)) {
2757 bh->b_end_io = end_buffer_write_sync;
2758 ret = submit_bh(WRITE, bh);
2760 if (buffer_eopnotsupp(bh)) {
2761 clear_buffer_eopnotsupp(bh);
2764 if (!ret && !buffer_uptodate(bh))
2773 * try_to_free_buffers() checks if all the buffers on this particular page
2774 * are unused, and releases them if so.
2776 * Exclusion against try_to_free_buffers may be obtained by either
2777 * locking the page or by holding its mapping's private_lock.
2779 * If the page is dirty but all the buffers are clean then we need to
2780 * be sure to mark the page clean as well. This is because the page
2781 * may be against a block device, and a later reattachment of buffers
2782 * to a dirty page will set *all* buffers dirty. Which would corrupt
2783 * filesystem data on the same device.
2785 * The same applies to regular filesystem pages: if all the buffers are
2786 * clean then we set the page clean and proceed. To do that, we require
2787 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2790 * try_to_free_buffers() is non-blocking.
2792 static inline int buffer_busy(struct buffer_head *bh)
2794 return atomic_read(&bh->b_count) |
2795 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2799 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2801 struct buffer_head *head = page_buffers(page);
2802 struct buffer_head *bh;
2806 if (buffer_write_io_error(bh) && page->mapping)
2807 set_bit(AS_EIO, &page->mapping->flags);
2808 if (buffer_busy(bh))
2810 bh = bh->b_this_page;
2811 } while (bh != head);
2814 struct buffer_head *next = bh->b_this_page;
2816 if (!list_empty(&bh->b_assoc_buffers))
2817 __remove_assoc_queue(bh);
2819 } while (bh != head);
2820 *buffers_to_free = head;
2821 __clear_page_buffers(page);
2827 int try_to_free_buffers(struct page *page)
2829 struct address_space * const mapping = page->mapping;
2830 struct buffer_head *buffers_to_free = NULL;
2833 BUG_ON(!PageLocked(page));
2834 if (PageWriteback(page))
2837 if (mapping == NULL) { /* can this still happen? */
2838 ret = drop_buffers(page, &buffers_to_free);
2842 spin_lock(&mapping->private_lock);
2843 ret = drop_buffers(page, &buffers_to_free);
2844 spin_unlock(&mapping->private_lock);
2847 * If the filesystem writes its buffers by hand (eg ext3)
2848 * then we can have clean buffers against a dirty page. We
2849 * clean the page here; otherwise later reattachment of buffers
2850 * could encounter a non-uptodate page, which is unresolvable.
2851 * This only applies in the rare case where try_to_free_buffers
2852 * succeeds but the page is not freed.
2854 clear_page_dirty(page);
2857 if (buffers_to_free) {
2858 struct buffer_head *bh = buffers_to_free;
2861 struct buffer_head *next = bh->b_this_page;
2862 free_buffer_head(bh);
2864 } while (bh != buffers_to_free);
2868 EXPORT_SYMBOL(try_to_free_buffers);
2870 void block_sync_page(struct page *page)
2872 struct address_space *mapping;
2875 mapping = page_mapping(page);
2877 blk_run_backing_dev(mapping->backing_dev_info, page);
2881 * There are no bdflush tunables left. But distributions are
2882 * still running obsolete flush daemons, so we terminate them here.
2884 * Use of bdflush() is deprecated and will be removed in a future kernel.
2885 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2887 asmlinkage long sys_bdflush(int func, long data)
2889 static int msg_count;
2891 if (!capable(CAP_SYS_ADMIN))
2894 if (msg_count < 5) {
2897 "warning: process `%s' used the obsolete bdflush"
2898 " system call\n", current->comm);
2899 printk(KERN_INFO "Fix your initscripts?\n");
2908 * Buffer-head allocation
2910 static struct kmem_cache *bh_cachep;
2913 * Once the number of bh's in the machine exceeds this level, we start
2914 * stripping them in writeback.
2916 static int max_buffer_heads;
2918 int buffer_heads_over_limit;
2920 struct bh_accounting {
2921 int nr; /* Number of live bh's */
2922 int ratelimit; /* Limit cacheline bouncing */
2925 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2927 static void recalc_bh_state(void)
2932 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2934 __get_cpu_var(bh_accounting).ratelimit = 0;
2935 for_each_online_cpu(i)
2936 tot += per_cpu(bh_accounting, i).nr;
2937 buffer_heads_over_limit = (tot > max_buffer_heads);
2940 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2942 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2944 get_cpu_var(bh_accounting).nr++;
2946 put_cpu_var(bh_accounting);
2950 EXPORT_SYMBOL(alloc_buffer_head);
2952 void free_buffer_head(struct buffer_head *bh)
2954 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2955 kmem_cache_free(bh_cachep, bh);
2956 get_cpu_var(bh_accounting).nr--;
2958 put_cpu_var(bh_accounting);
2960 EXPORT_SYMBOL(free_buffer_head);
2963 init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
2965 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2966 SLAB_CTOR_CONSTRUCTOR) {
2967 struct buffer_head * bh = (struct buffer_head *)data;
2969 memset(bh, 0, sizeof(*bh));
2970 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2974 static void buffer_exit_cpu(int cpu)
2977 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2979 for (i = 0; i < BH_LRU_SIZE; i++) {
2983 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
2984 per_cpu(bh_accounting, cpu).nr = 0;
2985 put_cpu_var(bh_accounting);
2988 static int buffer_cpu_notify(struct notifier_block *self,
2989 unsigned long action, void *hcpu)
2991 if (action == CPU_DEAD)
2992 buffer_exit_cpu((unsigned long)hcpu);
2996 void __init buffer_init(void)
3000 bh_cachep = kmem_cache_create("buffer_head",
3001 sizeof(struct buffer_head), 0,
3002 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3008 * Limit the bh occupancy to 10% of ZONE_NORMAL
3010 nrpages = (nr_free_buffer_pages() * 10) / 100;
3011 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3012 hotcpu_notifier(buffer_cpu_notify, 0);
3015 EXPORT_SYMBOL(__bforget);
3016 EXPORT_SYMBOL(__brelse);
3017 EXPORT_SYMBOL(__wait_on_buffer);
3018 EXPORT_SYMBOL(block_commit_write);
3019 EXPORT_SYMBOL(block_prepare_write);
3020 EXPORT_SYMBOL(block_read_full_page);
3021 EXPORT_SYMBOL(block_sync_page);
3022 EXPORT_SYMBOL(block_truncate_page);
3023 EXPORT_SYMBOL(block_write_full_page);
3024 EXPORT_SYMBOL(cont_prepare_write);
3025 EXPORT_SYMBOL(end_buffer_read_sync);
3026 EXPORT_SYMBOL(end_buffer_write_sync);
3027 EXPORT_SYMBOL(file_fsync);
3028 EXPORT_SYMBOL(fsync_bdev);
3029 EXPORT_SYMBOL(generic_block_bmap);
3030 EXPORT_SYMBOL(generic_commit_write);
3031 EXPORT_SYMBOL(generic_cont_expand);
3032 EXPORT_SYMBOL(generic_cont_expand_simple);
3033 EXPORT_SYMBOL(init_buffer);
3034 EXPORT_SYMBOL(invalidate_bdev);
3035 EXPORT_SYMBOL(ll_rw_block);
3036 EXPORT_SYMBOL(mark_buffer_dirty);
3037 EXPORT_SYMBOL(submit_bh);
3038 EXPORT_SYMBOL(sync_dirty_buffer);
3039 EXPORT_SYMBOL(unlock_buffer);